ngram
listlengths
0
67.8k
[ "= {} for symbol, mean in self.symbolmeans.items(): if random.random() < 0.2: quotes[symbol] =", "mean in self.symbolmeans.items(): if random.random() < 0.2: quotes[symbol] = round(random.normalvariate(mean, 20), 2) for", "generate(self): quotes = {} for symbol, mean in self.symbolmeans.items(): if random.random() < 0.2:", "<filename>examples/stockquotes-old/phase1/stockmarket.py import random class StockMarket(object): def __init__(self, marketname, symbols): self.name = marketname self.symbolmeans", "for symbol in symbols: self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators = [] def generate(self):", "aggregator in self.aggregators: aggregator.quotes(self.name, quotes) def listener(self, aggregator): self.aggregators.append(aggregator) def symbols(self): return self.symbolmeans.keys()", "= round(random.normalvariate(mean, 20), 2) for aggregator in self.aggregators: aggregator.quotes(self.name, quotes) def listener(self, aggregator):", "for symbol, mean in self.symbolmeans.items(): if random.random() < 0.2: quotes[symbol] = round(random.normalvariate(mean, 20),", "2) for aggregator in self.aggregators: aggregator.quotes(self.name, quotes) def listener(self, aggregator): self.aggregators.append(aggregator) def symbols(self):", "symbol in symbols: self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators = [] def generate(self): quotes", "import random class StockMarket(object): def __init__(self, marketname, symbols): self.name = marketname self.symbolmeans =", "marketname self.symbolmeans = {} for symbol in symbols: self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators", "self.name = marketname self.symbolmeans = {} for symbol in symbols: self.symbolmeans[symbol] = random.uniform(20,", "[] def generate(self): quotes = {} for symbol, mean in self.symbolmeans.items(): if random.random()", "random class StockMarket(object): def __init__(self, marketname, symbols): self.name = marketname self.symbolmeans = {}", "if random.random() < 0.2: quotes[symbol] = round(random.normalvariate(mean, 20), 2) for aggregator in self.aggregators:", "def __init__(self, marketname, symbols): self.name = marketname self.symbolmeans = {} for symbol in", "marketname, symbols): self.name = marketname self.symbolmeans = {} for symbol in symbols: self.symbolmeans[symbol]", "< 0.2: quotes[symbol] = round(random.normalvariate(mean, 20), 2) for aggregator in self.aggregators: aggregator.quotes(self.name, quotes)", "= [] def generate(self): quotes = {} for symbol, mean in self.symbolmeans.items(): if", "for aggregator in self.aggregators: aggregator.quotes(self.name, quotes) def listener(self, aggregator): self.aggregators.append(aggregator) def symbols(self): return", "random.random() < 0.2: quotes[symbol] = round(random.normalvariate(mean, 20), 2) for aggregator in self.aggregators: aggregator.quotes(self.name,", "self.symbolmeans.items(): if random.random() < 0.2: quotes[symbol] = round(random.normalvariate(mean, 20), 2) for aggregator in", "StockMarket(object): def __init__(self, marketname, symbols): self.name = marketname self.symbolmeans = {} for symbol", "self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators = [] def generate(self): quotes = {} for", "{} for symbol, mean in self.symbolmeans.items(): if random.random() < 0.2: quotes[symbol] = round(random.normalvariate(mean,", "quotes[symbol] = round(random.normalvariate(mean, 20), 2) for aggregator in self.aggregators: aggregator.quotes(self.name, quotes) def listener(self,", "0.2: quotes[symbol] = round(random.normalvariate(mean, 20), 2) for aggregator in self.aggregators: aggregator.quotes(self.name, quotes) def", "def generate(self): quotes = {} for symbol, mean in self.symbolmeans.items(): if random.random() <", "__init__(self, marketname, symbols): self.name = marketname self.symbolmeans = {} for symbol in symbols:", "symbols: self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators = [] def generate(self): quotes = {}", "self.symbolmeans = {} for symbol in symbols: self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators =", "symbols): self.name = marketname self.symbolmeans = {} for symbol in symbols: self.symbolmeans[symbol] =", "= {} for symbol in symbols: self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators = []", "self.aggregators = [] def generate(self): quotes = {} for symbol, mean in self.symbolmeans.items():", "= random.uniform(20, 200) self.aggregators = [] def generate(self): quotes = {} for symbol,", "class StockMarket(object): def __init__(self, marketname, symbols): self.name = marketname self.symbolmeans = {} for", "in self.symbolmeans.items(): if random.random() < 0.2: quotes[symbol] = round(random.normalvariate(mean, 20), 2) for aggregator", "symbol, mean in self.symbolmeans.items(): if random.random() < 0.2: quotes[symbol] = round(random.normalvariate(mean, 20), 2)", "200) self.aggregators = [] def generate(self): quotes = {} for symbol, mean in", "round(random.normalvariate(mean, 20), 2) for aggregator in self.aggregators: aggregator.quotes(self.name, quotes) def listener(self, aggregator): self.aggregators.append(aggregator)", "random.uniform(20, 200) self.aggregators = [] def generate(self): quotes = {} for symbol, mean", "in symbols: self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators = [] def generate(self): quotes =", "{} for symbol in symbols: self.symbolmeans[symbol] = random.uniform(20, 200) self.aggregators = [] def", "quotes = {} for symbol, mean in self.symbolmeans.items(): if random.random() < 0.2: quotes[symbol]", "= marketname self.symbolmeans = {} for symbol in symbols: self.symbolmeans[symbol] = random.uniform(20, 200)", "20), 2) for aggregator in self.aggregators: aggregator.quotes(self.name, quotes) def listener(self, aggregator): self.aggregators.append(aggregator) def" ]
[ "'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience", "exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS REST API',", "license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers',", "import re setup_path = os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description =", "API', keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\",", "-*- from setuptools import setup, find_packages import io import os import re setup_path", "MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python ::", "test_suite=\"tests\", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic", "- Production/Stable', 'Intended Audience :: Developers', 'Topic :: Office/Business', 'License :: OSI Approved", "os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS REST", "find_packages import io import os import re setup_path = os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path)", "description='SeedDMS REST API', keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'],", ":: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Office/Business', 'License ::", "seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development", "long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS REST API', keywords='dms seeddms', long_description=long_description,", "'Topic :: Office/Business', 'License :: OSI Approved :: MIT License', 'Programming Language ::", "import setup, find_packages import io import os import re setup_path = os.path.abspath(__file__) setup_path_dir", "'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS REST API', keywords='dms seeddms',", "os import re setup_path = os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description", "= os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS", "name='seeddms', version=__version__, description='SeedDMS REST API', keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src',", "setup, find_packages import io import os import re setup_path = os.path.abspath(__file__) setup_path_dir =", "keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[", "'Intended Audience :: Developers', 'Topic :: Office/Business', 'License :: OSI Approved :: MIT", ":: Office/Business', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python", "Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language ::", "'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS REST API', keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms',", "packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status :: 5 - Production/Stable',", "coding: utf-8 -*- from setuptools import setup, find_packages import io import os import", "setuptools import setup, find_packages import io import os import re setup_path = os.path.abspath(__file__)", "setup_path = os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read()", "= os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup(", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic ::", "REST API', keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT',", "Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Office/Business', 'License", "author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status ::", "os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms',", "open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS REST API', keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>',", "= open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS REST API', keywords='dms seeddms', long_description=long_description, author='<NAME>',", "OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language", "License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7',", "'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Office/Business',", "5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Office/Business', 'License :: OSI", "Office/Business', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python ::", "Audience :: Developers', 'Topic :: Office/Business', 'License :: OSI Approved :: MIT License',", "Production/Stable', 'Intended Audience :: Developers', 'Topic :: Office/Business', 'License :: OSI Approved ::", ":: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python", "'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended", "# -*- coding: utf-8 -*- from setuptools import setup, find_packages import io import", "io import os import re setup_path = os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms',", "re setup_path = os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir,", "install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience ::", "Language :: Python :: 2', 'Programming Language :: Python :: 2.7', ] )", "'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', ]", "version=__version__, description='SeedDMS REST API', keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']),", "<reponame>jvzantvoort/seeddms # -*- coding: utf-8 -*- from setuptools import setup, find_packages import io", "utf-8 -*- from setuptools import setup, find_packages import io import os import re", "'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2',", "Developers', 'Topic :: Office/Business', 'License :: OSI Approved :: MIT License', 'Programming Language", "import io import os import re setup_path = os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir,", "setup( name='seeddms', version=__version__, description='SeedDMS REST API', keywords='dms seeddms', long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs',", "url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status :: 5 -", "author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status :: 5", "'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__, description='SeedDMS REST API', keywords='dms", ":: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming", ":: Developers', 'Topic :: Office/Business', 'License :: OSI Approved :: MIT License', 'Programming", "setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read()) long_description = open(os.path.join(setup_path_dir, 'README.md')).read() setup( name='seeddms', version=__version__,", "-*- coding: utf-8 -*- from setuptools import setup, find_packages import io import os", "from setuptools import setup, find_packages import io import os import re setup_path =", "import os import re setup_path = os.path.abspath(__file__) setup_path_dir = os.path.dirname(setup_path) exec(open(os.path.join(setup_path_dir, 'seeddms', 'version.py')).read())", "long_description=long_description, author='<NAME>', author_email='<EMAIL>', url='https://github.com/jvzantvoort/seeddms', packages=find_packages(exclude=['docs', 'docs-src', 'tests']), install_requires=['requests'], license='MIT', test_suite=\"tests\", classifiers=[ 'Development Status" ]
[ "report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report): if 'first' in report: return report['first'] = (report['done']", "be automatically determined if not given. taskid : immutable The unique task ID.", "self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid, report.copy()) def _complement(self, taskid, report): report_copy =", "if the first report for the task. If not given, automatically determined from", "last : bool `True` if the last report for the task. If not", "= { } self.volatile_fileds = ('first', 'last') def __call__(self, report): taskid = report['taskid']", "} self.volatile_fileds = ('first', 'last') def __call__(self, report): taskid = report['taskid'] if taskid", "= report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report): if 'first' in report: return", "taskid, report): report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report): if 'first'", "def _last(self, report): if 'last' in report: return report['last'] = (report['done'] >= report['total'])", "done so far total : int The total iterations to be done name", "for the task. If not given, automatically determined from `done`; `True` if `done`", "`done` and `total`; `True` if `done` equals `total`, `False` otherwise \"\"\" def __init__(self):", "optional The number of the iterations done so far total : int The", "determined from `done` and `total`; `True` if `done` equals `total`, `False` otherwise \"\"\"", "ID. done : int, optional The number of the iterations done so far", "self._last(report) self._store(taskid, report.copy()) def _complement(self, taskid, report): report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy)", "report.copy()) def _complement(self, taskid, report): report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self,", "'last') def __call__(self, report): taskid = report['taskid'] if taskid in self.previous_reports: self._complement(taskid, report)", "report): report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report): if 'first' in", "`total`, `False` otherwise \"\"\" def __init__(self): self.previous_reports = { } self.volatile_fileds = ('first',", "reports Complement a progress report with the previous report for the same task.", "Complement a progress report with the previous report for the same task. Parameters", "first report for the task. If not given, automatically determined from `done`; `True`", "if 'first' in report: return report['first'] = (report['done'] == 0) def _last(self, report):", ": int, optional The number of the iterations done so far total :", "and 'name'. The `first` and `last` will be automatically determined if not given.", "iterations to be done name : str A name of the task. It", "automatically determined from `done`; `True` if `done` is 0, `False` otherwise last :", "bool `True` if the last report for the task. If not given, automatically", "`done` is 0, `False` otherwise last : bool `True` if the last report", "entries. The `taskid` must be always given. The first report for a task", "The unique task ID. done : int, optional The number of the iterations", "as the label on the progress bars. first : bool `True` if the", "given. The first report for a task must include `done`, `total`, and 'name'.", "# <NAME> <<EMAIL>> ##__________________________________________________________________|| class ProgressReportComplementer: \"\"\"Complement progress reports Complement a progress report", "report for the task. If not given, automatically determined from `done` and `total`;", "report['taskid'] if taskid in self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid, report.copy()) def _complement(self,", "be done name : str A name of the task. It will be", "label on the progress bars. first : bool `True` if the first report", "report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report): if 'first' in report:", "progress report with the previous report for the same task. Parameters ---------- report", "first : bool `True` if the first report for the task. If not", "{ } self.volatile_fileds = ('first', 'last') def __call__(self, report): taskid = report['taskid'] if", "always given. The first report for a task must include `done`, `total`, and", "progress bars. first : bool `True` if the first report for the task.", "is 0, `False` otherwise last : bool `True` if the last report for", "The total iterations to be done name : str A name of the", "str A name of the task. It will be use as the label", "report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report): if 'first' in report: return report['first'] =", "automatically determined if not given. taskid : immutable The unique task ID. done", "so far total : int The total iterations to be done name :", "on the progress bars. first : bool `True` if the first report for", "automatically determined from `done` and `total`; `True` if `done` equals `total`, `False` otherwise", "with the following entries. The `taskid` must be always given. The first report", "for the same task. Parameters ---------- report : dict A progress report, a", "def _store(self, taskid, report): for k in self.volatile_fileds: report.pop(k, None) self.previous_reports[taskid] = report", "if not given. taskid : immutable The unique task ID. done : int,", "`total`, and 'name'. The `first` and `last` will be automatically determined if not", "number of the iterations done so far total : int The total iterations", "<NAME> <<EMAIL>> ##__________________________________________________________________|| class ProgressReportComplementer: \"\"\"Complement progress reports Complement a progress report with", "be always given. The first report for a task must include `done`, `total`,", "task. If not given, automatically determined from `done`; `True` if `done` is 0,", "report : dict A progress report, a dict with the following entries. The", "__call__(self, report): taskid = report['taskid'] if taskid in self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report)", "self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid, report.copy()) def _complement(self, taskid, report): report_copy = report.copy()", "report: return report['first'] = (report['done'] == 0) def _last(self, report): if 'last' in", "'name'. The `first` and `last` will be automatically determined if not given. taskid", "given. taskid : immutable The unique task ID. done : int, optional The", "0) def _last(self, report): if 'last' in report: return report['last'] = (report['done'] >=", "and `last` will be automatically determined if not given. taskid : immutable The", "with the previous report for the same task. Parameters ---------- report : dict", "name of the task. It will be use as the label on the", "same task. Parameters ---------- report : dict A progress report, a dict with", "equals `total`, `False` otherwise \"\"\" def __init__(self): self.previous_reports = { } self.volatile_fileds =", "ProgressReportComplementer: \"\"\"Complement progress reports Complement a progress report with the previous report for", "dict A progress report, a dict with the following entries. The `taskid` must", "for the task. If not given, automatically determined from `done` and `total`; `True`", "'first' in report: return report['first'] = (report['done'] == 0) def _last(self, report): if", "def _first(self, report): if 'first' in report: return report['first'] = (report['done'] == 0)", "report['last'] = (report['done'] >= report['total']) def _store(self, taskid, report): for k in self.volatile_fileds:", "of the iterations done so far total : int The total iterations to", "= report['taskid'] if taskid in self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid, report.copy()) def", "from `done` and `total`; `True` if `done` equals `total`, `False` otherwise \"\"\" def", "and `total`; `True` if `done` equals `total`, `False` otherwise \"\"\" def __init__(self): self.previous_reports", "taskid : immutable The unique task ID. done : int, optional The number", "The `taskid` must be always given. The first report for a task must", ": str A name of the task. It will be use as the", "bars. first : bool `True` if the first report for the task. If", "The `first` and `last` will be automatically determined if not given. taskid :", "---------- report : dict A progress report, a dict with the following entries.", "\"\"\"Complement progress reports Complement a progress report with the previous report for the", "be use as the label on the progress bars. first : bool `True`", "in report: return report['first'] = (report['done'] == 0) def _last(self, report): if 'last'", "self.previous_reports = { } self.volatile_fileds = ('first', 'last') def __call__(self, report): taskid =", "_first(self, report): if 'first' in report: return report['first'] = (report['done'] == 0) def", "return report['first'] = (report['done'] == 0) def _last(self, report): if 'last' in report:", "<<EMAIL>> ##__________________________________________________________________|| class ProgressReportComplementer: \"\"\"Complement progress reports Complement a progress report with the", "_store(self, taskid, report): for k in self.volatile_fileds: report.pop(k, None) self.previous_reports[taskid] = report ##__________________________________________________________________||", "last report for the task. If not given, automatically determined from `done` and", "`False` otherwise \"\"\" def __init__(self): self.previous_reports = { } self.volatile_fileds = ('first', 'last')", "in report: return report['last'] = (report['done'] >= report['total']) def _store(self, taskid, report): for", "report with the previous report for the same task. Parameters ---------- report :", "The number of the iterations done so far total : int The total", "if `done` equals `total`, `False` otherwise \"\"\" def __init__(self): self.previous_reports = { }", "report for a task must include `done`, `total`, and 'name'. The `first` and", "report for the task. If not given, automatically determined from `done`; `True` if", "A progress report, a dict with the following entries. The `taskid` must be", "`total`; `True` if `done` equals `total`, `False` otherwise \"\"\" def __init__(self): self.previous_reports =", "task. It will be use as the label on the progress bars. first", "progress report, a dict with the following entries. The `taskid` must be always", "immutable The unique task ID. done : int, optional The number of the", "`True` if `done` is 0, `False` otherwise last : bool `True` if the", "if taskid in self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid, report.copy()) def _complement(self, taskid,", "`True` if `done` equals `total`, `False` otherwise \"\"\" def __init__(self): self.previous_reports = {", "= (report['done'] >= report['total']) def _store(self, taskid, report): for k in self.volatile_fileds: report.pop(k,", "def __call__(self, report): taskid = report['taskid'] if taskid in self.previous_reports: self._complement(taskid, report) self._first(report)", "__init__(self): self.previous_reports = { } self.volatile_fileds = ('first', 'last') def __call__(self, report): taskid", "== 0) def _last(self, report): if 'last' in report: return report['last'] = (report['done']", "task must include `done`, `total`, and 'name'. The `first` and `last` will be", "not given, automatically determined from `done`; `True` if `done` is 0, `False` otherwise", "'last' in report: return report['last'] = (report['done'] >= report['total']) def _store(self, taskid, report):", "_last(self, report): if 'last' in report: return report['last'] = (report['done'] >= report['total']) def", "(report['done'] == 0) def _last(self, report): if 'last' in report: return report['last'] =", "self._first(report) self._last(report) self._store(taskid, report.copy()) def _complement(self, taskid, report): report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid])", "dict with the following entries. The `taskid` must be always given. The first", "for a task must include `done`, `total`, and 'name'. The `first` and `last`", ">= report['total']) def _store(self, taskid, report): for k in self.volatile_fileds: report.pop(k, None) self.previous_reports[taskid]", "in self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid, report.copy()) def _complement(self, taskid, report): report_copy", "def __init__(self): self.previous_reports = { } self.volatile_fileds = ('first', 'last') def __call__(self, report):", "determined from `done`; `True` if `done` is 0, `False` otherwise last : bool", "report for the same task. Parameters ---------- report : dict A progress report,", "following entries. The `taskid` must be always given. The first report for a", ": bool `True` if the first report for the task. If not given,", "must be always given. The first report for a task must include `done`,", "iterations done so far total : int The total iterations to be done", "`taskid` must be always given. The first report for a task must include", "report, a dict with the following entries. The `taskid` must be always given.", "to be done name : str A name of the task. It will", "A name of the task. It will be use as the label on", "given, automatically determined from `done`; `True` if `done` is 0, `False` otherwise last", "taskid in self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid, report.copy()) def _complement(self, taskid, report):", "report): if 'first' in report: return report['first'] = (report['done'] == 0) def _last(self,", "class ProgressReportComplementer: \"\"\"Complement progress reports Complement a progress report with the previous report", ": dict A progress report, a dict with the following entries. The `taskid`", ": bool `True` if the last report for the task. If not given,", "int, optional The number of the iterations done so far total : int", "`True` if the first report for the task. If not given, automatically determined", "otherwise \"\"\" def __init__(self): self.previous_reports = { } self.volatile_fileds = ('first', 'last') def", "report) self._first(report) self._last(report) self._store(taskid, report.copy()) def _complement(self, taskid, report): report_copy = report.copy() report.clear()", ": int The total iterations to be done name : str A name", "the task. If not given, automatically determined from `done` and `total`; `True` if", "the task. It will be use as the label on the progress bars.", "report.update(report_copy) def _first(self, report): if 'first' in report: return report['first'] = (report['done'] ==", "int The total iterations to be done name : str A name of", "\"\"\" def __init__(self): self.previous_reports = { } self.volatile_fileds = ('first', 'last') def __call__(self,", "unique task ID. done : int, optional The number of the iterations done", "the label on the progress bars. first : bool `True` if the first", "`done` equals `total`, `False` otherwise \"\"\" def __init__(self): self.previous_reports = { } self.volatile_fileds", "the same task. Parameters ---------- report : dict A progress report, a dict", "given, automatically determined from `done` and `total`; `True` if `done` equals `total`, `False`", "a task must include `done`, `total`, and 'name'. The `first` and `last` will", "the following entries. The `taskid` must be always given. The first report for", "`done`, `total`, and 'name'. The `first` and `last` will be automatically determined if", "report): taskid = report['taskid'] if taskid in self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid,", "`True` if the last report for the task. If not given, automatically determined", "the last report for the task. If not given, automatically determined from `done`", "If not given, automatically determined from `done` and `total`; `True` if `done` equals", "Parameters ---------- report : dict A progress report, a dict with the following", "taskid = report['taskid'] if taskid in self.previous_reports: self._complement(taskid, report) self._first(report) self._last(report) self._store(taskid, report.copy())", "from `done`; `True` if `done` is 0, `False` otherwise last : bool `True`", "report['total']) def _store(self, taskid, report): for k in self.volatile_fileds: report.pop(k, None) self.previous_reports[taskid] =", "done name : str A name of the task. It will be use", "`False` otherwise last : bool `True` if the last report for the task.", "task ID. done : int, optional The number of the iterations done so", "name : str A name of the task. It will be use as", "total : int The total iterations to be done name : str A", "if `done` is 0, `False` otherwise last : bool `True` if the last", "otherwise last : bool `True` if the last report for the task. If", "include `done`, `total`, and 'name'. The `first` and `last` will be automatically determined", "def _complement(self, taskid, report): report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report):", "= ('first', 'last') def __call__(self, report): taskid = report['taskid'] if taskid in self.previous_reports:", "of the task. It will be use as the label on the progress", "(report['done'] >= report['total']) def _store(self, taskid, report): for k in self.volatile_fileds: report.pop(k, None)", "progress reports Complement a progress report with the previous report for the same", "the task. If not given, automatically determined from `done`; `True` if `done` is", "report: return report['last'] = (report['done'] >= report['total']) def _store(self, taskid, report): for k", "return report['last'] = (report['done'] >= report['total']) def _store(self, taskid, report): for k in", "##__________________________________________________________________|| class ProgressReportComplementer: \"\"\"Complement progress reports Complement a progress report with the previous", "0, `False` otherwise last : bool `True` if the last report for the", "('first', 'last') def __call__(self, report): taskid = report['taskid'] if taskid in self.previous_reports: self._complement(taskid,", "determined if not given. taskid : immutable The unique task ID. done :", "a progress report with the previous report for the same task. Parameters ----------", "not given. taskid : immutable The unique task ID. done : int, optional", "a dict with the following entries. The `taskid` must be always given. The", "report): if 'last' in report: return report['last'] = (report['done'] >= report['total']) def _store(self,", "must include `done`, `total`, and 'name'. The `first` and `last` will be automatically", "if the last report for the task. If not given, automatically determined from", "It will be use as the label on the progress bars. first :", "bool `True` if the first report for the task. If not given, automatically", "will be use as the label on the progress bars. first : bool", "the first report for the task. If not given, automatically determined from `done`;", "`last` will be automatically determined if not given. taskid : immutable The unique", "self._store(taskid, report.copy()) def _complement(self, taskid, report): report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def", "first report for a task must include `done`, `total`, and 'name'. The `first`", "the previous report for the same task. Parameters ---------- report : dict A", "= (report['done'] == 0) def _last(self, report): if 'last' in report: return report['last']", "will be automatically determined if not given. taskid : immutable The unique task", ": immutable The unique task ID. done : int, optional The number of", "if 'last' in report: return report['last'] = (report['done'] >= report['total']) def _store(self, taskid,", "_complement(self, taskid, report): report_copy = report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report): if", "the iterations done so far total : int The total iterations to be", "task. Parameters ---------- report : dict A progress report, a dict with the", "far total : int The total iterations to be done name : str", "total iterations to be done name : str A name of the task.", "done : int, optional The number of the iterations done so far total", "use as the label on the progress bars. first : bool `True` if", "self.volatile_fileds = ('first', 'last') def __call__(self, report): taskid = report['taskid'] if taskid in", "`first` and `last` will be automatically determined if not given. taskid : immutable", "If not given, automatically determined from `done`; `True` if `done` is 0, `False`", "not given, automatically determined from `done` and `total`; `True` if `done` equals `total`,", "report.copy() report.clear() report.update(self.previous_reports[taskid]) report.update(report_copy) def _first(self, report): if 'first' in report: return report['first']", "the progress bars. first : bool `True` if the first report for the", "task. If not given, automatically determined from `done` and `total`; `True` if `done`", "previous report for the same task. Parameters ---------- report : dict A progress", "The first report for a task must include `done`, `total`, and 'name'. The", "report['first'] = (report['done'] == 0) def _last(self, report): if 'last' in report: return", "`done`; `True` if `done` is 0, `False` otherwise last : bool `True` if" ]
[ "found directory found in the packages file is higher than the output_base_dirpath. \"", "directory found in the packages file is higher than the output_base_dirpath. \" \"Please", "highest_found_directory self.output_base_dirpath = output_base_dirpath def __str__(self): return message_with_vars( message=\"The highest found directory found", "def __str__(self): return message_with_vars( message=\"The highest found directory found in the packages file", "file is higher than the output_base_dirpath. \" \"Please increase the output_base_dirpath to an", "= highest_found_directory self.output_base_dirpath = output_base_dirpath def __str__(self): return message_with_vars( message=\"The highest found directory", "return message_with_vars( message=\"The highest found directory found in the packages file is higher", "import message_with_vars class OutputDirpathTooLow(Exception): def __init__(self, highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory = highest_found_directory", "__init__(self, highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory = highest_found_directory self.output_base_dirpath = output_base_dirpath def __str__(self):", ".utils import message_with_vars class OutputDirpathTooLow(Exception): def __init__(self, highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory =", "def __init__(self, highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory = highest_found_directory self.output_base_dirpath = output_base_dirpath def", "OutputDirpathTooLow(Exception): def __init__(self, highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory = highest_found_directory self.output_base_dirpath = output_base_dirpath", "the output_base_dirpath to an higher directory.\", vars_dict={ 'highest_found_directory': self.highest_found_directory, 'output_base_dirpath': self.output_base_dirpath } )", "found in the packages file is higher than the output_base_dirpath. \" \"Please increase", "str, output_base_dirpath: str): self.highest_found_directory = highest_found_directory self.output_base_dirpath = output_base_dirpath def __str__(self): return message_with_vars(", "than the output_base_dirpath. \" \"Please increase the output_base_dirpath to an higher directory.\", vars_dict={", "self.output_base_dirpath = output_base_dirpath def __str__(self): return message_with_vars( message=\"The highest found directory found in", "in the packages file is higher than the output_base_dirpath. \" \"Please increase the", "higher than the output_base_dirpath. \" \"Please increase the output_base_dirpath to an higher directory.\",", "\" \"Please increase the output_base_dirpath to an higher directory.\", vars_dict={ 'highest_found_directory': self.highest_found_directory, 'output_base_dirpath':", "self.highest_found_directory = highest_found_directory self.output_base_dirpath = output_base_dirpath def __str__(self): return message_with_vars( message=\"The highest found", "output_base_dirpath: str): self.highest_found_directory = highest_found_directory self.output_base_dirpath = output_base_dirpath def __str__(self): return message_with_vars( message=\"The", "<gh_stars>1-10 from .utils import message_with_vars class OutputDirpathTooLow(Exception): def __init__(self, highest_found_directory: str, output_base_dirpath: str):", "class OutputDirpathTooLow(Exception): def __init__(self, highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory = highest_found_directory self.output_base_dirpath =", "output_base_dirpath. \" \"Please increase the output_base_dirpath to an higher directory.\", vars_dict={ 'highest_found_directory': self.highest_found_directory,", "highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory = highest_found_directory self.output_base_dirpath = output_base_dirpath def __str__(self): return", "str): self.highest_found_directory = highest_found_directory self.output_base_dirpath = output_base_dirpath def __str__(self): return message_with_vars( message=\"The highest", "output_base_dirpath def __str__(self): return message_with_vars( message=\"The highest found directory found in the packages", "message_with_vars( message=\"The highest found directory found in the packages file is higher than", "packages file is higher than the output_base_dirpath. \" \"Please increase the output_base_dirpath to", "message=\"The highest found directory found in the packages file is higher than the", "the output_base_dirpath. \" \"Please increase the output_base_dirpath to an higher directory.\", vars_dict={ 'highest_found_directory':", "__str__(self): return message_with_vars( message=\"The highest found directory found in the packages file is", "increase the output_base_dirpath to an higher directory.\", vars_dict={ 'highest_found_directory': self.highest_found_directory, 'output_base_dirpath': self.output_base_dirpath }", "from .utils import message_with_vars class OutputDirpathTooLow(Exception): def __init__(self, highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory", "highest found directory found in the packages file is higher than the output_base_dirpath.", "\"Please increase the output_base_dirpath to an higher directory.\", vars_dict={ 'highest_found_directory': self.highest_found_directory, 'output_base_dirpath': self.output_base_dirpath", "= output_base_dirpath def __str__(self): return message_with_vars( message=\"The highest found directory found in the", "message_with_vars class OutputDirpathTooLow(Exception): def __init__(self, highest_found_directory: str, output_base_dirpath: str): self.highest_found_directory = highest_found_directory self.output_base_dirpath", "is higher than the output_base_dirpath. \" \"Please increase the output_base_dirpath to an higher", "the packages file is higher than the output_base_dirpath. \" \"Please increase the output_base_dirpath" ]
[ "= 'Current Image' def __str__(self): return self.first_name + ' ' + self.last_name class", "django.db import models from django.utils.safestring import mark_safe from bookstore.accounts.models import BookstoreUser import cloudinary.models", "= models.CharField(choices=[(country, country) for country in list_of_countries], blank=True, max_length=44) city = models.CharField(max_length=200, blank=True)", "on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text = models.TextField() date_posted = models.DateField(auto_now_add=True)", "max_length=30, blank=True) is_author = models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True,", "bookstore.accounts.models import BookstoreUser import cloudinary.models as cloudinary_models from bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators", "def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description = 'Current Image' def", "AuthorReview(models.Model): text = models.TextField() date_posted = models.DateField(auto_now_add=True) author = models.ForeignKey(Profile, on_delete=models.CASCADE) user =", "max_length=44) city = models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20, blank=True)", "from django.utils.safestring import mark_safe from bookstore.accounts.models import BookstoreUser import cloudinary.models as cloudinary_models from", "class AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text", "cloudinary_models from bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name UserModel =", "models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text = models.TextField() date_posted =", "import models from django.utils.safestring import mark_safe from bookstore.accounts.models import BookstoreUser import cloudinary.models as", "' ' + self.last_name class AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel,", "import mark_safe from bookstore.accounts.models import BookstoreUser import cloudinary.models as cloudinary_models from bookstore.profiles.misc import", "biography = models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country, country) for country", "<filename>bookstore/profiles/models.py from django.contrib.auth import get_user_model from django.db import models from django.utils.safestring import mark_safe", "from django.db import models from django.utils.safestring import mark_safe from bookstore.accounts.models import BookstoreUser import", "import get_user_model from django.db import models from django.utils.safestring import mark_safe from bookstore.accounts.models import", "country in list_of_countries], blank=True, max_length=44) city = models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200, blank=True)", "= models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\"", "validate_name UserModel = get_user_model() class Profile(models.Model): first_name = models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200,", "self.first_name + ' ' + self.last_name class AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user", "= models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author", "author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author = models.ForeignKey(Profile,", "user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\"", "models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class", "resource_type='image') country = models.CharField(choices=[(country, country) for country in list_of_countries], blank=True, max_length=44) city =", "country) for country in list_of_countries], blank=True, max_length=44) city = models.CharField(max_length=200, blank=True) street_address =", "bookstore.profiles.validators import validate_city, validate_phone_number, validate_name UserModel = get_user_model() class Profile(models.Model): first_name = models.CharField(max_length=200,", "Image' def __str__(self): return self.first_name + ' ' + self.last_name class AuthorLike(models.Model): author", "models.CharField(choices=[(country, country) for country in list_of_countries], blank=True, max_length=44) city = models.CharField(max_length=200, blank=True) street_address", "models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author = models.BooleanField(default=False) is_complete = models.BooleanField(default=False)", "= models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author = models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser,", "height=\"150\" />') image_tag.short_description = 'Current Image' def __str__(self): return self.first_name + ' '", "= models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text = models.TextField() date_posted", "BookstoreUser import cloudinary.models as cloudinary_models from bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators import validate_city,", "mark_safe from bookstore.accounts.models import BookstoreUser import cloudinary.models as cloudinary_models from bookstore.profiles.misc import list_of_countries", "cloudinary.models as cloudinary_models from bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name", "on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user =", "class AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author", "country = models.CharField(choices=[(country, country) for country in list_of_countries], blank=True, max_length=44) city = models.CharField(max_length=200,", "models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image')", "' + self.last_name class AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE)", "AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text =", "image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country, country) for country in list_of_countries], blank=True,", "blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author = models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user", "import validate_city, validate_phone_number, validate_name UserModel = get_user_model() class Profile(models.Model): first_name = models.CharField(max_length=200, blank=True)", "mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description = 'Current Image' def __str__(self): return self.first_name", "/>') image_tag.short_description = 'Current Image' def __str__(self): return self.first_name + ' ' +", "blank=True) street_address = models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30,", "django.contrib.auth import get_user_model from django.db import models from django.utils.safestring import mark_safe from bookstore.accounts.models", "width=\"150\" height=\"150\" />') image_tag.short_description = 'Current Image' def __str__(self): return self.first_name + '", "import cloudinary.models as cloudinary_models from bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators import validate_city, validate_phone_number,", "models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\"", "'Current Image' def __str__(self): return self.first_name + ' ' + self.last_name class AuthorLike(models.Model):", "text = models.TextField() date_posted = models.DateField(auto_now_add=True) author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel,", "for country in list_of_countries], blank=True, max_length=44) city = models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200,", "from bookstore.accounts.models import BookstoreUser import cloudinary.models as cloudinary_models from bookstore.profiles.misc import list_of_countries from", "models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country, country)", "on_delete=models.CASCADE) class AuthorReview(models.Model): text = models.TextField() date_posted = models.DateField(auto_now_add=True) author = models.ForeignKey(Profile, on_delete=models.CASCADE)", "list_of_countries], blank=True, max_length=44) city = models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200, blank=True) post_code =", "= models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self):", "image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description = 'Current Image' def __str__(self):", "return self.first_name + ' ' + self.last_name class AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE)", "from django.contrib.auth import get_user_model from django.db import models from django.utils.safestring import mark_safe from", "UserModel = get_user_model() class Profile(models.Model): first_name = models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200, blank=True)", "street_address = models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True)", "image_tag.short_description = 'Current Image' def __str__(self): return self.first_name + ' ' + self.last_name", "validate_city, validate_phone_number, validate_name UserModel = get_user_model() class Profile(models.Model): first_name = models.CharField(max_length=200, blank=True) last_name", "class AuthorReview(models.Model): text = models.TextField() date_posted = models.DateField(auto_now_add=True) author = models.ForeignKey(Profile, on_delete=models.CASCADE) user", "blank=True) def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description = 'Current Image'", "import list_of_countries from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name UserModel = get_user_model() class Profile(models.Model):", "import BookstoreUser import cloudinary.models as cloudinary_models from bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators import", "= models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />')", "models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number],", "models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description", "= cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country, country) for country in list_of_countries], blank=True, max_length=44)", "def __str__(self): return self.first_name + ' ' + self.last_name class AuthorLike(models.Model): author =", "+ self.last_name class AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class", "author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text = models.TextField()", "get_user_model() class Profile(models.Model): first_name = models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200, blank=True) biography =", "= models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author = models.BooleanField(default=False) is_complete =", "blank=True) is_author = models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True)", "= models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text = models.TextField() date_posted = models.DateField(auto_now_add=True) author =", "models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text = models.TextField() date_posted = models.DateField(auto_now_add=True) author = models.ForeignKey(Profile,", "src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description = 'Current Image' def __str__(self): return self.first_name +", "first_name = models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True) image =", "= models.TextField() date_posted = models.DateField(auto_now_add=True) author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE)", "last_name = models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country =", "post_code = models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author = models.BooleanField(default=False) is_complete", "is_author = models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def", "primary_key=True, blank=True) def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description = 'Current", "models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user", "blank=True) post_code = models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author = models.BooleanField(default=False)", "class Profile(models.Model): first_name = models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True)", "Profile(models.Model): first_name = models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True) image", "in list_of_countries], blank=True, max_length=44) city = models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200, blank=True) post_code", "on_delete=models.CASCADE) class AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model):", "= models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE)", "list_of_countries from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name UserModel = get_user_model() class Profile(models.Model): first_name", "is_complete = models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self): return mark_safe(f'<img", "user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel,", "models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20, blank=True) phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author =", "= models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country,", "models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author = models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE,", "self.last_name class AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model):", "return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description = 'Current Image' def __str__(self): return", "as cloudinary_models from bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name UserModel", "= get_user_model() class Profile(models.Model): first_name = models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200, blank=True) biography", "__str__(self): return self.first_name + ' ' + self.last_name class AuthorLike(models.Model): author = models.ForeignKey(Profile,", "get_user_model from django.db import models from django.utils.safestring import mark_safe from bookstore.accounts.models import BookstoreUser", "blank=True, max_length=44) city = models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20,", "from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name UserModel = get_user_model() class Profile(models.Model): first_name =", "= models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20, blank=True) phone =", "models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user = models.OneToOneField(BookstoreUser, on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self): return", "cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country, country) for country in list_of_countries], blank=True, max_length=44) city", "blank=True) last_name = models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country", "= models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country, country) for country in", "= models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE)", "= models.CharField(max_length=200, blank=True) last_name = models.CharField(max_length=200, blank=True) biography = models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True,", "city = models.CharField(max_length=200, blank=True) street_address = models.CharField(max_length=200, blank=True) post_code = models.CharField(max_length=20, blank=True) phone", "models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country, country) for country in list_of_countries],", "blank=True) biography = models.TextField(blank=True) image = cloudinary_models.CloudinaryField(blank=True, resource_type='image') country = models.CharField(choices=[(country, country) for", "bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name UserModel = get_user_model() class", "django.utils.safestring import mark_safe from bookstore.accounts.models import BookstoreUser import cloudinary.models as cloudinary_models from bookstore.profiles.misc", "from bookstore.profiles.misc import list_of_countries from bookstore.profiles.validators import validate_city, validate_phone_number, validate_name UserModel = get_user_model()", "validate_phone_number, validate_name UserModel = get_user_model() class Profile(models.Model): first_name = models.CharField(max_length=200, blank=True) last_name =", "models from django.utils.safestring import mark_safe from bookstore.accounts.models import BookstoreUser import cloudinary.models as cloudinary_models", "phone = models.CharField(validators=[validate_phone_number], max_length=30, blank=True) is_author = models.BooleanField(default=False) is_complete = models.BooleanField(default=False) user =", "AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorDislike(models.Model): author =", "on_delete=models.CASCADE, primary_key=True, blank=True) def image_tag(self): return mark_safe(f'<img src=\"{self.image.url}\" width=\"150\" height=\"150\" />') image_tag.short_description =", "user = models.ForeignKey(UserModel, on_delete=models.CASCADE) class AuthorReview(models.Model): text = models.TextField() date_posted = models.DateField(auto_now_add=True) author", "+ ' ' + self.last_name class AuthorLike(models.Model): author = models.ForeignKey(Profile, on_delete=models.CASCADE) user =" ]
[ "models.BooleanField('Use custom image?', default=False) custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True) def __str__(self): return", "models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use custom image?', default=False) custom_image = models.ImageField('Custom image', upload_to='custom_images',", "here. class ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50) localized_name = models.CharField('Localized name', max_length=50) nicknames", "models here. class ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50) localized_name = models.CharField('Localized name', max_length=50)", "image?', default=False) custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True) def __str__(self): return self.name class", "class ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50) localized_name = models.CharField('Localized name', max_length=50) nicknames =", "blank=True) use_custom_image = models.BooleanField('Use custom image?', default=False) custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True)", "your models here. class ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50) localized_name = models.CharField('Localized name',", "models.CharField('Localized name', max_length=50) nicknames = models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use custom image?', default=False)", "ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50) localized_name = models.CharField('Localized name', max_length=50) nicknames = models.TextField('Nicknames',", "localized_name = models.CharField('Localized name', max_length=50) nicknames = models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use custom", "django.db import models # Create your models here. class ValveDataAbstract(models.Model): name = models.CharField('Name',", "<filename>cheetah_core/djangoapps/core/models.py from django.db import models # Create your models here. class ValveDataAbstract(models.Model): name", "nicknames = models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use custom image?', default=False) custom_image = models.ImageField('Custom", "from django.db import models # Create your models here. class ValveDataAbstract(models.Model): name =", "= models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use custom image?', default=False) custom_image = models.ImageField('Custom image',", "= models.ImageField('Custom image', upload_to='custom_images', blank=True) def __str__(self): return self.name class Meta: abstract =", "name = models.CharField('Name', max_length=50) localized_name = models.CharField('Localized name', max_length=50) nicknames = models.TextField('Nicknames', blank=True)", "models.ImageField('Custom image', upload_to='custom_images', blank=True) def __str__(self): return self.name class Meta: abstract = True", "= models.BooleanField('Use custom image?', default=False) custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True) def __str__(self):", "= models.CharField('Name', max_length=50) localized_name = models.CharField('Localized name', max_length=50) nicknames = models.TextField('Nicknames', blank=True) use_custom_image", "models.CharField('Name', max_length=50) localized_name = models.CharField('Localized name', max_length=50) nicknames = models.TextField('Nicknames', blank=True) use_custom_image =", "models # Create your models here. class ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50) localized_name", "import models # Create your models here. class ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50)", "custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True) def __str__(self): return self.name class Meta: abstract", "# Create your models here. class ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50) localized_name =", "use_custom_image = models.BooleanField('Use custom image?', default=False) custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True) def", "default=False) custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True) def __str__(self): return self.name class Meta:", "max_length=50) localized_name = models.CharField('Localized name', max_length=50) nicknames = models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use", "= models.CharField('Localized name', max_length=50) nicknames = models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use custom image?',", "max_length=50) nicknames = models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use custom image?', default=False) custom_image =", "name', max_length=50) nicknames = models.TextField('Nicknames', blank=True) use_custom_image = models.BooleanField('Use custom image?', default=False) custom_image", "Create your models here. class ValveDataAbstract(models.Model): name = models.CharField('Name', max_length=50) localized_name = models.CharField('Localized", "custom image?', default=False) custom_image = models.ImageField('Custom image', upload_to='custom_images', blank=True) def __str__(self): return self.name" ]
[ "= a return APP def cache(*args, **kwargs): return APP.cache(*args, **kwargs) def add(*args, **kwargs):", "add(*args, **kwargs): return APP.add(*args, **kwargs) def find(*args, **kwargs): return APP.world.find(*args, **kwargs) def find_one(*args,", "APP = None def qork_app(a=None): global APP if a is None: return APP", "**kwargs): return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs) def remove(*args,", "None: return APP APP = a return APP def cache(*args, **kwargs): return APP.cache(*args,", "**kwargs): return APP.add(*args, **kwargs) def find(*args, **kwargs): return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs):", "remove(*args, **kwargs): return APP.remove(*args, **kwargs) def create(*args, **kwargs): return APP.create(*args, **kwargs) def clear():", "return APP.add(*args, **kwargs) def find(*args, **kwargs): return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return", "a is None: return APP APP = a return APP def cache(*args, **kwargs):", "return APP.cache(*args, **kwargs) def add(*args, **kwargs): return APP.add(*args, **kwargs) def find(*args, **kwargs): return", "APP.scene.clear() def play(*args, **kwargs): return APP.play(*args, **kwargs) # def music(fn): # return APP.add(fn,", "= None def qork_app(a=None): global APP if a is None: return APP APP", "import Signal from qork.reactive import * APP = None def qork_app(a=None): global APP", "APP APP = a return APP def cache(*args, **kwargs): return APP.cache(*args, **kwargs) def", "**kwargs) def clear(): return APP.scene.clear() def play(*args, **kwargs): return APP.play(*args, **kwargs) # def", "**kwargs): return APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs): return APP.remove(*args, **kwargs) def create(*args,", "**kwargs) def add(*args, **kwargs): return APP.add(*args, **kwargs) def find(*args, **kwargs): return APP.world.find(*args, **kwargs)", "return APP.scene.clear() def play(*args, **kwargs): return APP.play(*args, **kwargs) # def music(fn): # return", "def cache(*args, **kwargs): return APP.cache(*args, **kwargs) def add(*args, **kwargs): return APP.add(*args, **kwargs) def", "APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs): return", "APP.remove(*args, **kwargs) def create(*args, **kwargs): return APP.create(*args, **kwargs) def clear(): return APP.scene.clear() def", "def remove(*args, **kwargs): return APP.remove(*args, **kwargs) def create(*args, **kwargs): return APP.create(*args, **kwargs) def", "qork.signal import Signal from qork.reactive import * APP = None def qork_app(a=None): global", "def find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs): return APP.remove(*args, **kwargs)", "APP.cache(*args, **kwargs) def add(*args, **kwargs): return APP.add(*args, **kwargs) def find(*args, **kwargs): return APP.world.find(*args,", "Signal from qork.reactive import * APP = None def qork_app(a=None): global APP if", "None def qork_app(a=None): global APP if a is None: return APP APP =", "a return APP def cache(*args, **kwargs): return APP.cache(*args, **kwargs) def add(*args, **kwargs): return", "from collections import defaultdict from qork.signal import Signal from qork.reactive import * APP", "APP.create(*args, **kwargs) def clear(): return APP.scene.clear() def play(*args, **kwargs): return APP.play(*args, **kwargs) #", "defaultdict from qork.signal import Signal from qork.reactive import * APP = None def", "return APP.remove(*args, **kwargs) def create(*args, **kwargs): return APP.create(*args, **kwargs) def clear(): return APP.scene.clear()", "**kwargs): return APP.create(*args, **kwargs) def clear(): return APP.scene.clear() def play(*args, **kwargs): return APP.play(*args,", "import defaultdict from qork.signal import Signal from qork.reactive import * APP = None", "def play(*args, **kwargs): return APP.play(*args, **kwargs) # def music(fn): # return APP.add(fn, loop=True)", "import * APP = None def qork_app(a=None): global APP if a is None:", "qork_app(a=None): global APP if a is None: return APP APP = a return", "APP def cache(*args, **kwargs): return APP.cache(*args, **kwargs) def add(*args, **kwargs): return APP.add(*args, **kwargs)", "return APP APP = a return APP def cache(*args, **kwargs): return APP.cache(*args, **kwargs)", "global APP if a is None: return APP APP = a return APP", "from qork.signal import Signal from qork.reactive import * APP = None def qork_app(a=None):", "APP.add(*args, **kwargs) def find(*args, **kwargs): return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return APP.world.find(*args,", "def find(*args, **kwargs): return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs)", "create(*args, **kwargs): return APP.create(*args, **kwargs) def clear(): return APP.scene.clear() def play(*args, **kwargs): return", "return APP def cache(*args, **kwargs): return APP.cache(*args, **kwargs) def add(*args, **kwargs): return APP.add(*args,", "find(*args, **kwargs): return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs) def", "def qork_app(a=None): global APP if a is None: return APP APP = a", "return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs):", "clear(): return APP.scene.clear() def play(*args, **kwargs): return APP.play(*args, **kwargs) # def music(fn): #", "def create(*args, **kwargs): return APP.create(*args, **kwargs) def clear(): return APP.scene.clear() def play(*args, **kwargs):", "**kwargs): return APP.remove(*args, **kwargs) def create(*args, **kwargs): return APP.create(*args, **kwargs) def clear(): return", "APP if a is None: return APP APP = a return APP def", "**kwargs) def remove(*args, **kwargs): return APP.remove(*args, **kwargs) def create(*args, **kwargs): return APP.create(*args, **kwargs)", "**kwargs) def find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs): return APP.remove(*args,", "**kwargs) def create(*args, **kwargs): return APP.create(*args, **kwargs) def clear(): return APP.scene.clear() def play(*args,", "collections import defaultdict from qork.signal import Signal from qork.reactive import * APP =", "APP = a return APP def cache(*args, **kwargs): return APP.cache(*args, **kwargs) def add(*args,", "if a is None: return APP APP = a return APP def cache(*args,", "return APP.create(*args, **kwargs) def clear(): return APP.scene.clear() def play(*args, **kwargs): return APP.play(*args, **kwargs)", "qork.reactive import * APP = None def qork_app(a=None): global APP if a is", "* APP = None def qork_app(a=None): global APP if a is None: return", "find_one(*args, **kwargs): return APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs): return APP.remove(*args, **kwargs) def", "#!/usr/bin/python from collections import defaultdict from qork.signal import Signal from qork.reactive import *", "is None: return APP APP = a return APP def cache(*args, **kwargs): return", "from qork.reactive import * APP = None def qork_app(a=None): global APP if a", "**kwargs): return APP.cache(*args, **kwargs) def add(*args, **kwargs): return APP.add(*args, **kwargs) def find(*args, **kwargs):", "return APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs): return APP.remove(*args, **kwargs) def create(*args, **kwargs):", "APP.world.find(*args, one=True, **kwargs) def remove(*args, **kwargs): return APP.remove(*args, **kwargs) def create(*args, **kwargs): return", "one=True, **kwargs) def remove(*args, **kwargs): return APP.remove(*args, **kwargs) def create(*args, **kwargs): return APP.create(*args,", "def add(*args, **kwargs): return APP.add(*args, **kwargs) def find(*args, **kwargs): return APP.world.find(*args, **kwargs) def", "**kwargs) def find(*args, **kwargs): return APP.world.find(*args, **kwargs) def find_one(*args, **kwargs): return APP.world.find(*args, one=True,", "cache(*args, **kwargs): return APP.cache(*args, **kwargs) def add(*args, **kwargs): return APP.add(*args, **kwargs) def find(*args,", "def clear(): return APP.scene.clear() def play(*args, **kwargs): return APP.play(*args, **kwargs) # def music(fn):" ]
[ "def print_models(unprinted_designs, completed_models): while unprinted_designs: current_design = unprinted_designs.pop() print(\"Printing model: \" + current_design)", "print_models(unprinted_designs, completed_models): while unprinted_designs: current_design = unprinted_designs.pop() print(\"Printing model: \" + current_design) completed_models.append(current_design)" ]
[ "<reponame>smilelight/nymph # -*- coding: utf-8 -*- from typing import Dict, List def is_breakpoint(item:", "from typing import Dict, List def is_breakpoint(item: dict): # if item['text_feature'] != 'text':", "idx_list.append(i) if 0 not in idx_list: idx_list.insert(0, 0) if len(dataset) not in idx_list:", "return True if item['is_center'] is True: return True if item['is_bold'] is True: return", "# if item['text_feature'] != 'text': if item['text_feature'] not in ['table', 'text', 'image']: return", "return True if item['is_bold'] is True: return True return False def doc_split_fn(dataset: List[Dict]):", "idx_list: idx_list.insert(0, 0) if len(dataset) not in idx_list: idx_list.append(len(dataset)) return idx_list def doc_label_parse(labels:", "-*- from typing import Dict, List def is_breakpoint(item: dict): # if item['text_feature'] !=", "True: return True if item['is_bold'] is True: return True return False def doc_split_fn(dataset:", "True if item['is_bold'] is True: return True return False def doc_split_fn(dataset: List[Dict]): idx_list", "True if item['is_center'] is True: return True if item['is_bold'] is True: return True", "if item['is_bold'] is True: return True return False def doc_split_fn(dataset: List[Dict]): idx_list =", "'text', 'image']: return True if item['is_center'] is True: return True if item['is_bold'] is", "List[Dict]): idx_list = [] for i, item in enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if", "idx_list = [] for i, item in enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if 0", "if len(dataset) not in idx_list: idx_list.append(len(dataset)) return idx_list def doc_label_parse(labels: List[str]): return labels", "!= 'text': if item['text_feature'] not in ['table', 'text', 'image']: return True if item['is_center']", "= [] for i, item in enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if 0 not", "item['text_feature'] not in ['table', 'text', 'image']: return True if item['is_center'] is True: return", "item['is_center'] is True: return True if item['is_bold'] is True: return True return False", "[] for i, item in enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if 0 not in", "'text': if item['text_feature'] not in ['table', 'text', 'image']: return True if item['is_center'] is", "if item['text_feature'] != 'text': if item['text_feature'] not in ['table', 'text', 'image']: return True", "item['text_feature'] != 'text': if item['text_feature'] not in ['table', 'text', 'image']: return True if", "in enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if 0 not in idx_list: idx_list.insert(0, 0) if", "import Dict, List def is_breakpoint(item: dict): # if item['text_feature'] != 'text': if item['text_feature']", "is True: return True if item['is_bold'] is True: return True return False def", "True return False def doc_split_fn(dataset: List[Dict]): idx_list = [] for i, item in", "def doc_split_fn(dataset: List[Dict]): idx_list = [] for i, item in enumerate(dataset): if is_breakpoint(item):", "# -*- coding: utf-8 -*- from typing import Dict, List def is_breakpoint(item: dict):", "def is_breakpoint(item: dict): # if item['text_feature'] != 'text': if item['text_feature'] not in ['table',", "is_breakpoint(item: dict): # if item['text_feature'] != 'text': if item['text_feature'] not in ['table', 'text',", "Dict, List def is_breakpoint(item: dict): # if item['text_feature'] != 'text': if item['text_feature'] not", "for i, item in enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if 0 not in idx_list:", "'image']: return True if item['is_center'] is True: return True if item['is_bold'] is True:", "utf-8 -*- from typing import Dict, List def is_breakpoint(item: dict): # if item['text_feature']", "List def is_breakpoint(item: dict): # if item['text_feature'] != 'text': if item['text_feature'] not in", "dict): # if item['text_feature'] != 'text': if item['text_feature'] not in ['table', 'text', 'image']:", "if is_breakpoint(item): idx_list.append(i) if 0 not in idx_list: idx_list.insert(0, 0) if len(dataset) not", "enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if 0 not in idx_list: idx_list.insert(0, 0) if len(dataset)", "is True: return True return False def doc_split_fn(dataset: List[Dict]): idx_list = [] for", "doc_split_fn(dataset: List[Dict]): idx_list = [] for i, item in enumerate(dataset): if is_breakpoint(item): idx_list.append(i)", "in ['table', 'text', 'image']: return True if item['is_center'] is True: return True if", "in idx_list: idx_list.insert(0, 0) if len(dataset) not in idx_list: idx_list.append(len(dataset)) return idx_list def", "is_breakpoint(item): idx_list.append(i) if 0 not in idx_list: idx_list.insert(0, 0) if len(dataset) not in", "item['is_bold'] is True: return True return False def doc_split_fn(dataset: List[Dict]): idx_list = []", "return True return False def doc_split_fn(dataset: List[Dict]): idx_list = [] for i, item", "not in idx_list: idx_list.insert(0, 0) if len(dataset) not in idx_list: idx_list.append(len(dataset)) return idx_list", "0 not in idx_list: idx_list.insert(0, 0) if len(dataset) not in idx_list: idx_list.append(len(dataset)) return", "typing import Dict, List def is_breakpoint(item: dict): # if item['text_feature'] != 'text': if", "if item['text_feature'] not in ['table', 'text', 'image']: return True if item['is_center'] is True:", "if item['is_center'] is True: return True if item['is_bold'] is True: return True return", "0) if len(dataset) not in idx_list: idx_list.append(len(dataset)) return idx_list def doc_label_parse(labels: List[str]): return", "-*- coding: utf-8 -*- from typing import Dict, List def is_breakpoint(item: dict): #", "False def doc_split_fn(dataset: List[Dict]): idx_list = [] for i, item in enumerate(dataset): if", "not in ['table', 'text', 'image']: return True if item['is_center'] is True: return True", "['table', 'text', 'image']: return True if item['is_center'] is True: return True if item['is_bold']", "True: return True return False def doc_split_fn(dataset: List[Dict]): idx_list = [] for i,", "i, item in enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if 0 not in idx_list: idx_list.insert(0,", "item in enumerate(dataset): if is_breakpoint(item): idx_list.append(i) if 0 not in idx_list: idx_list.insert(0, 0)", "if 0 not in idx_list: idx_list.insert(0, 0) if len(dataset) not in idx_list: idx_list.append(len(dataset))", "idx_list.insert(0, 0) if len(dataset) not in idx_list: idx_list.append(len(dataset)) return idx_list def doc_label_parse(labels: List[str]):", "return False def doc_split_fn(dataset: List[Dict]): idx_list = [] for i, item in enumerate(dataset):", "coding: utf-8 -*- from typing import Dict, List def is_breakpoint(item: dict): # if" ]
[ "1 if n > 1: for i in range(2, n+1): fibonaccinumber3 = fibonaccinumber1", "range(2, n+1): fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3", "fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2 t0 = time.perf_counter() n =", "the fibonacci algorithm without using recursion. # Can you find bigger terms of", "1: for i in range(2, n+1): fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 =", "fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2 t0 = time.perf_counter() n = 9 result", "# Can you find bigger terms of the sequence? # Can you find", "of the sequence? # Can you find fib(200)? import time def fibonacci(n): fibonaccinumber1", "= 1 if n > 1: for i in range(2, n+1): fibonaccinumber3 =", "in range(2, n+1): fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 =", "# Rewrite the fibonacci algorithm without using recursion. # Can you find bigger", "the sequence? # Can you find fib(200)? import time def fibonacci(n): fibonaccinumber1 =", "fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0}) = {1}, ({2:.2f} secs)\".format(n, result, t1 - t0))", "fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2 t0 = time.perf_counter() n = 9 result =", "= fibonaccinumber3 return fibonaccinumber2 t0 = time.perf_counter() n = 9 result = fibonacci(n)", "= fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2 t0", "for i in range(2, n+1): fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2", "n+1): fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return", "fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2 = 1 if n > 1: for i", "fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2", "fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2 t0 = time.perf_counter() n", "fibonaccinumber2 t0 = time.perf_counter() n = 9 result = fibonacci(n) t1 = time.perf_counter()", "sequence? # Can you find fib(200)? import time def fibonacci(n): fibonaccinumber1 = 0", "def fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2 = 1 if n > 1: for", "9 result = fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0}) = {1}, ({2:.2f} secs)\".format(n, result,", "bigger terms of the sequence? # Can you find fib(200)? import time def", "without using recursion. # Can you find bigger terms of the sequence? #", "n > 1: for i in range(2, n+1): fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2", "= fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2 t0 = time.perf_counter() n = 9", "fibonaccinumber3 return fibonaccinumber2 t0 = time.perf_counter() n = 9 result = fibonacci(n) t1", "= time.perf_counter() n = 9 result = fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0}) =", "result = fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0}) = {1}, ({2:.2f} secs)\".format(n, result, t1", "time def fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2 = 1 if n > 1:", "= fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0}) = {1}, ({2:.2f} secs)\".format(n, result, t1 -", "find fib(200)? import time def fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2 = 1 if", "+ fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2 t0 = time.perf_counter()", "i in range(2, n+1): fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2", "if n > 1: for i in range(2, n+1): fibonaccinumber3 = fibonaccinumber1 +", "t0 = time.perf_counter() n = 9 result = fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0})", "Can you find bigger terms of the sequence? # Can you find fib(200)?", "import time def fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2 = 1 if n >", "fibonacci algorithm without using recursion. # Can you find bigger terms of the", "recursion. # Can you find bigger terms of the sequence? # Can you", "return fibonaccinumber2 t0 = time.perf_counter() n = 9 result = fibonacci(n) t1 =", "terms of the sequence? # Can you find fib(200)? import time def fibonacci(n):", "<reponame>matthijskrul/ThinkPython # Rewrite the fibonacci algorithm without using recursion. # Can you find", "Can you find fib(200)? import time def fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2 =", "fibonaccinumber1 = 0 fibonaccinumber2 = 1 if n > 1: for i in", "> 1: for i in range(2, n+1): fibonaccinumber3 = fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1", "n = 9 result = fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0}) = {1}, ({2:.2f}", "0 fibonaccinumber2 = 1 if n > 1: for i in range(2, n+1):", "Rewrite the fibonacci algorithm without using recursion. # Can you find bigger terms", "you find fib(200)? import time def fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2 = 1", "algorithm without using recursion. # Can you find bigger terms of the sequence?", "fib(200)? import time def fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2 = 1 if n", "you find bigger terms of the sequence? # Can you find fib(200)? import", "using recursion. # Can you find bigger terms of the sequence? # Can", "find bigger terms of the sequence? # Can you find fib(200)? import time", "# Can you find fib(200)? import time def fibonacci(n): fibonaccinumber1 = 0 fibonaccinumber2", "= 0 fibonaccinumber2 = 1 if n > 1: for i in range(2,", "fibonaccinumber2 = 1 if n > 1: for i in range(2, n+1): fibonaccinumber3", "= 9 result = fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0}) = {1}, ({2:.2f} secs)\".format(n,", "fibonaccinumber1 + fibonaccinumber2 fibonaccinumber1 = fibonaccinumber2 fibonaccinumber2 = fibonaccinumber3 return fibonaccinumber2 t0 =", "time.perf_counter() n = 9 result = fibonacci(n) t1 = time.perf_counter() print(\"fibonacci({0}) = {1}," ]
[]
[ "author_elem.find(part) if elem is not None: author_info[elem.tag] = elem.text authors.append(author_info) return pd.DataFrame(authors, columns=self.fields)", "article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\": pmcid} for part in [ \"name/surname\", \"name/given-names\",", "pd.DataFrame: authors = [] pmcid = _utils.get_pmcid(article) for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ):", "etree from nqdc._typing import BaseExtractor from nqdc import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list", "-> pd.DataFrame: authors = [] pmcid = _utils.get_pmcid(article) for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\"", "= [] pmcid = _utils.get_pmcid(article) for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info =", "\"\"\"Extracting list of authors from article XML.\"\"\" fields = (\"pmcid\", \"surname\", \"given-names\") name", "from lxml import etree from nqdc._typing import BaseExtractor from nqdc import _utils class", "article: etree.ElementTree) -> pd.DataFrame: authors = [] pmcid = _utils.get_pmcid(article) for author_elem in", "part in [ \"name/surname\", \"name/given-names\", ]: elem = author_elem.find(part) if elem is not", "= author_elem.find(part) if elem is not None: author_info[elem.tag] = elem.text authors.append(author_info) return pd.DataFrame(authors,", "from nqdc import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors from article XML.\"\"\"", "as pd from lxml import etree from nqdc._typing import BaseExtractor from nqdc import", "author_info = {\"pmcid\": pmcid} for part in [ \"name/surname\", \"name/given-names\", ]: elem =", "for part in [ \"name/surname\", \"name/given-names\", ]: elem = author_elem.find(part) if elem is", "): author_info = {\"pmcid\": pmcid} for part in [ \"name/surname\", \"name/given-names\", ]: elem", "= {\"pmcid\": pmcid} for part in [ \"name/surname\", \"name/given-names\", ]: elem = author_elem.find(part)", "]: elem = author_elem.find(part) if elem is not None: author_info[elem.tag] = elem.text authors.append(author_info)", "fields = (\"pmcid\", \"surname\", \"given-names\") name = \"authors\" def extract(self, article: etree.ElementTree) ->", "for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\": pmcid} for part in", "extract(self, article: etree.ElementTree) -> pd.DataFrame: authors = [] pmcid = _utils.get_pmcid(article) for author_elem", "import etree from nqdc._typing import BaseExtractor from nqdc import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting", "import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors from article XML.\"\"\" fields =", "from article XML.\"\"\" fields = (\"pmcid\", \"surname\", \"given-names\") name = \"authors\" def extract(self,", "= (\"pmcid\", \"surname\", \"given-names\") name = \"authors\" def extract(self, article: etree.ElementTree) -> pd.DataFrame:", "\"name/surname\", \"name/given-names\", ]: elem = author_elem.find(part) if elem is not None: author_info[elem.tag] =", "<reponame>neuroquery/nqdc<filename>src/nqdc/_authors.py \"\"\"Extracting list of authors from article XML.\"\"\" import pandas as pd from", "authors from article XML.\"\"\" import pandas as pd from lxml import etree from", "list of authors from article XML.\"\"\" import pandas as pd from lxml import", "\"name/given-names\", ]: elem = author_elem.find(part) if elem is not None: author_info[elem.tag] = elem.text", "of authors from article XML.\"\"\" import pandas as pd from lxml import etree", "pmcid} for part in [ \"name/surname\", \"name/given-names\", ]: elem = author_elem.find(part) if elem", "authors from article XML.\"\"\" fields = (\"pmcid\", \"surname\", \"given-names\") name = \"authors\" def", "= _utils.get_pmcid(article) for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\": pmcid} for", "AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors from article XML.\"\"\" fields = (\"pmcid\", \"surname\", \"given-names\")", "article XML.\"\"\" import pandas as pd from lxml import etree from nqdc._typing import", "from article XML.\"\"\" import pandas as pd from lxml import etree from nqdc._typing", "\"\"\"Extracting list of authors from article XML.\"\"\" import pandas as pd from lxml", "def extract(self, article: etree.ElementTree) -> pd.DataFrame: authors = [] pmcid = _utils.get_pmcid(article) for", "authors = [] pmcid = _utils.get_pmcid(article) for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info", "in [ \"name/surname\", \"name/given-names\", ]: elem = author_elem.find(part) if elem is not None:", "pandas as pd from lxml import etree from nqdc._typing import BaseExtractor from nqdc", "= \"authors\" def extract(self, article: etree.ElementTree) -> pd.DataFrame: authors = [] pmcid =", "import BaseExtractor from nqdc import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors from", "pmcid = _utils.get_pmcid(article) for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\": pmcid}", "pd from lxml import etree from nqdc._typing import BaseExtractor from nqdc import _utils", "{\"pmcid\": pmcid} for part in [ \"name/surname\", \"name/given-names\", ]: elem = author_elem.find(part) if", "author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\": pmcid} for part in [", "lxml import etree from nqdc._typing import BaseExtractor from nqdc import _utils class AuthorsExtractor(BaseExtractor):", "[ \"name/surname\", \"name/given-names\", ]: elem = author_elem.find(part) if elem is not None: author_info[elem.tag]", "[] pmcid = _utils.get_pmcid(article) for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\":", "from nqdc._typing import BaseExtractor from nqdc import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of", "nqdc._typing import BaseExtractor from nqdc import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors", "BaseExtractor from nqdc import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors from article", "\"given-names\") name = \"authors\" def extract(self, article: etree.ElementTree) -> pd.DataFrame: authors = []", "\"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\": pmcid} for part in [ \"name/surname\", \"name/given-names\", ]:", "(\"pmcid\", \"surname\", \"given-names\") name = \"authors\" def extract(self, article: etree.ElementTree) -> pd.DataFrame: authors", "of authors from article XML.\"\"\" fields = (\"pmcid\", \"surname\", \"given-names\") name = \"authors\"", "name = \"authors\" def extract(self, article: etree.ElementTree) -> pd.DataFrame: authors = [] pmcid", "\"surname\", \"given-names\") name = \"authors\" def extract(self, article: etree.ElementTree) -> pd.DataFrame: authors =", "_utils.get_pmcid(article) for author_elem in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\": pmcid} for part", "list of authors from article XML.\"\"\" fields = (\"pmcid\", \"surname\", \"given-names\") name =", "article XML.\"\"\" fields = (\"pmcid\", \"surname\", \"given-names\") name = \"authors\" def extract(self, article:", "nqdc import _utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors from article XML.\"\"\" fields", "XML.\"\"\" fields = (\"pmcid\", \"surname\", \"given-names\") name = \"authors\" def extract(self, article: etree.ElementTree)", "in article.iterfind( \"front/article-meta/contrib-group/contrib[@contrib-type='author']\" ): author_info = {\"pmcid\": pmcid} for part in [ \"name/surname\",", "\"authors\" def extract(self, article: etree.ElementTree) -> pd.DataFrame: authors = [] pmcid = _utils.get_pmcid(article)", "elem = author_elem.find(part) if elem is not None: author_info[elem.tag] = elem.text authors.append(author_info) return", "etree.ElementTree) -> pd.DataFrame: authors = [] pmcid = _utils.get_pmcid(article) for author_elem in article.iterfind(", "XML.\"\"\" import pandas as pd from lxml import etree from nqdc._typing import BaseExtractor", "_utils class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors from article XML.\"\"\" fields = (\"pmcid\",", "import pandas as pd from lxml import etree from nqdc._typing import BaseExtractor from", "class AuthorsExtractor(BaseExtractor): \"\"\"Extracting list of authors from article XML.\"\"\" fields = (\"pmcid\", \"surname\"," ]
[ "= self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self,", "def _c_gender(self, u_id, gender) -> bool: if gender.isdigit() and int(gender) in range(1, 3):", "в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def", "start(self): for event in self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname", "- всё сложно\\n' '6 - в активном поиске\\n7 - влюблён/влюблена\\n8 - в гражданском", "city=usr.city, relation=usr.relation) def _generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence() is None:", "_next(self, user_id, user_token, user_firstname) -> None: usr_search = UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token)", "self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я запомню вашу днюху ☺️') return True self._send_msg(u_id, 'Дата", "= f\"неизвестная команда '{txt_msg}' 😞\\nнапишите -> start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id,", "город' '\\nпр. \"/from 🇺🇦 Киев\" или \"/from 🇷🇺 Москва\" или \"/from BY Минск\"')", "self._resend(user_id, '/re 1-6') elif step == 1 and user_exist: if start: self._send_msg(user_id, 'приветствую,", "QUEUE = Queue() COMMANDS = {'start', 'начать', 'search', 'свайп вправо', 'доб. в избранное',", "-> девушка, \"/gender 2\" -> парень') return False if usr_info['relation'] is None: self._ask_relation_msg(u_id)", "return True self._send_msg(u_id, 'Семейное положение указан неверно') return False def _search_users(self, u_id, user_token):", "text_msg) else: user_exist = self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start =", "'а давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/dob': txt_c =", "def _move_to_black(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if", "избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в избраный список ⭐\\n' 'идет следующий поиск...️') self._next(user_id,", "keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id())", "color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id, usr_name) -> None: message =", "-> None: message = ('Ваше семейное положение? Отправьте \"/re\" и цифру от 1", "if event.type == VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower()", "if usr_info['city'] is None: self._send_msg(u_id, 'Откуда вы? в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿)", "= UserAppToken(session) self.user_app = UserApp(session) self.oauth_link = oauth_link self.u_vk_api = None def _get_firstname(self,", "успешно нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class", "keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(),", "положение? Отправьте \"/re\" и цифру от 1 - 8\\n\\n1 - не женат/не замужем\\n'", "len(text_msg.split()) == 2 if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/dob D.M.YYYY')", "права 🐼 после нажмите на зеленую кнопку \"start\" ' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign", "elif text_msg.split()[0] == '/gender': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_gender(user_id, text_msg.split()[1])", "return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token, user_firstname) -> None: usr_search = UserSearchList(user_id, session)", "peer_id, usr_name) -> None: message = f'{usr_name}, <NAME> вызвать меня сможете написав ->", "== 'ну...давай позже 😔': self._send_bye(user_id, user_firstname) elif text_msg == 'а давай познакомимся 🐼':", "{value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name) -> None: message =", "user_id) -> None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is", "and int(gender) in range(1, 3): self.user_app.update(u_id, int(gender), 'gender') return True self._send_msg(u_id, 'Неверный пол')", "True def _re_check(self, u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users, args=(u_id,", "if usr_info['dob'] is None: self._send_msg(u_id, 'Напишите дату рождения в формате: -> /dob D.M.YYYY", "range(1, 9): self.user_app.update(u_id, int(relation), 'relation') return True self._send_msg(u_id, 'Семейное положение указан неверно') return", "в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info()", "num[0], num[1], num[2] if d.isdigit() and m.isdigit() and y.isdigit(): if 1 <= int(d)", "user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return True def", "self.user_app.update(u_id, int(relation), 'relation') return True self._send_msg(u_id, 'Семейное положение указан неверно') return False def", "peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _resend(self, peer_id, value: str):", "dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return True def _check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id) usr_info", "user_token) else: self._resend(user_id, '/re 1-6') elif step == 1 and user_exist: if start:", "добавлен в избраный список ⭐\\n' 'идет следующий поиск...️') self._next(user_id, user_token, user_firstname) elif text_msg", "снизу 🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg == 'доб.", "следующий поиск...️') self._next(user_id, user_token, user_firstname) elif text_msg == 'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id,", "😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id, usr_name) -> None: message", "< 2014: user['dob'] = None if user['gender'] == 0: user['gender'] = None if", "country, city) -> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\",", "VKinderUser(u_token).get_info() if user['dob'] is None or len(user['dob'].split('.')) != 3 or not 1942 <=", "usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence()", "u_id, name, usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name}, подходящих пользователей", "= SearchEngine(user_id, user_token) random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id)", "\"/from 🇺🇦 Киев\" или \"/from 🇷🇺 Москва\" или \"/from BY Минск\"') return False", "неверно') return False def _search_users(self, u_id, user_token): usr_search = UserSearchList(u_id, session) s_engine =", "and user_exist is False: self._send_msg_sign_up(user_id, user_firstname) elif step == 0 and user_exist: if", "session) v_usr_cook = VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token) random_id = self._generate_user(user_id, user_firstname, usr_search,", "написав -> start или по кнопке из меню чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start',", "def _ask_to_move_msg(self, peer_id) -> None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб.", "import threading import vk_api import datetime as dt import flag from queue import", "не знакомы... давай познакомимся? 🐼\\n' \\ f'(нужно познакомится с пандой, чтобы перейти к", "user_id is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) -> None:", "not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return False def _c_dob(self, u_id, answer) -> bool:", "int(d) <= 31 and 1 <= int(m) <= 12 and 1942 <= int(y)", "== 0: user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return", "s_engine = SearchEngine(user_id, user_token) random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine) get_id =", "- помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё сложно\\n' '6 - в активном поиске\\n7 -", "подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def", "len(text_msg.split()) == 2 if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/re 1-6')", "message=message, keyboard=None, random_id=get_random_id()) def _resend(self, peer_id, value: str): message = f'Неверный формат, правильный", "keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(),", "D.M.YYYY (от 9 до 80 лет допускается)' '\\nпр. \"/dob 15.7.1990\" ') return False", "f'{name}, успешно нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id", "keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id, usr_name)", "self.userapp_token.get_last_searched_id(user_id) if get_id is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id)", "команда '{txt_msg}' 😞\\nнапишите -> start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(),", "\"/gender 2\" -> парень') return False if usr_info['relation'] is None: self._ask_relation_msg(u_id) return False", "_re_check(self, u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True)", "keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id, usr_name) -> None: message = f'{usr_name}, <NAME> вызвать", "None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging def _send_msg(self, peer_id, message) -> None:", "кнопке снизу \"sign up 📝\" и ' \\ f'выдайте необходимые права 🐼 после", "сложно\\n' '6 - в активном поиске\\n7 - влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр. \"/re", "or not 1942 <= int(user['dob'].split('.')[2]) < 2014: user['dob'] = None if user['gender'] ==", "80 лет допускается)' '\\nпр. \"/dob 15.7.1990\" ') return False if usr_info['city'] is None:", "'city') self._send_msg(u_id, f'{country} {city} ☺️') return True self._send_msg(u_id, 'Страна/город указан неверено') return False", "usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self, u_id, name,", "if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name},", "1 <= int(d) <= 31 and 1 <= int(m) <= 12 and 1942", "self._re_check(user_id, user_token) else: self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0] == '/from': txt_c = len(text_msg.split())", "keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg) -> None: message", "s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine):", "if start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg == 'ну...давай позже 😔': self._send_bye(user_id, user_firstname) elif", "UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None)", "- 8\\n\\n1 - не женат/не замужем\\n' '2 - есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4", "vk_api.keyboard import VkKeyboard, VkKeyboardColor from vk_api.utils import get_random_id from src.vk_chat_bot.db.database import UserAppToken, UserSearchList,", "\"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) -> None: keyboard =", "<= 31 and 1 <= int(m) <= 12 and 1942 <= int(y) <=", "text_msg in {'start', 'начать'} next_ = text_msg in {'next', 'свайп вправо'} if start", "'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в избраный список ⭐\\n' 'идет следующий", "get_random_id from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser,", "UserApp, session from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook class VKGroupManage: QUEUE = Queue()", "пр. \"/gender 1\" -> девушка, \"/gender 2\" -> парень') return False if usr_info['relation']", "event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id}, text: {text_msg} \") if text_msg not in VKLaunchGroup.COMMANDS", "u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while", "=> (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр. \"/from 🇺🇦 Киев\" или \"/from", "country = self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if country_id != 0: ci = self.u_vk_api.database.getCities(country_id=country_id,", "в чс', 'ну...давай позже 😔', 'а давай познакомимся 🐼'} def __init__(self, vk_group_token, group_id,", "<= int(m) <= 12 and 1942 <= int(y) <= 2013: self.user_app.update(u_id, answer, 'dob')", "self._send_msg(u_id, 'я запомню вашу днюху ☺️') return True self._send_msg(u_id, 'Дата указана неверено') return", "vk_api.utils import get_random_id from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte import", "или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр. \"/from 🇺🇦 Киев\" или \"/from 🇷🇺 Москва\"", "txt_c = len(text_msg.split()) == 3 if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif", "u_token): user = VKinderUser(u_token).get_info() if user['dob'] is None or len(user['dob'].split('.')) != 3 or", "\"start\" ' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message,", "def _send_msg(self, peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _resend(self, peer_id,", "usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation': usr.relation} if usr_info['dob'] is None: self._send_msg(u_id, 'Напишите", "is None: self._send_msg(u_id, 'Напишите дату рождения в формате: -> /dob D.M.YYYY (от 9", "in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not in {'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg)", "2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я запомню вашу днюху ☺️') return True self._send_msg(u_id,", "3 or not 1942 <= int(user['dob'].split('.')[2]) < 2014: user['dob'] = None if user['gender']", "color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname) -> None: message =", "= answer.split('.') if len(num) == 3: d, m, y = num[0], num[1], num[2]", "ботом перейдите по кнопке снизу \"sign up 📝\" и ' \\ f'выдайте необходимые", "True self._send_msg(u_id, 'Дата указана неверено') return False def _c_city(self, u_id, country, city) ->", "позже 😔': self._send_bye(user_id, user_firstname) elif text_msg == 'а давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id,", "<= int(d) <= 31 and 1 <= int(m) <= 12 and 1942 <=", "usr_info['gender'] is None: self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender 1\" -> девушка, \"/gender 2\"", "'/dob': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else:", "цифру от 1 - 8\\n\\n1 - не женат/не замужем\\n' '2 - есть друг/есть", "вернитесь чуть позже😓') return None r_usr = usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach)", "self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/gender 1 или /gender 2') elif text_msg.split()[0]", "country_id != 0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country}", "2') elif text_msg.split()[0] == '/re': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_relation(user_id,", "'/gender': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else:", "True self._send_msg(u_id, 'Семейное положение указан неверно') return False def _search_users(self, u_id, user_token): usr_search", "VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message,", "из меню чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def", "user_exist: if start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg == 'ну...давай позже 😔': self._send_bye(user_id, user_firstname)", "добавлен в черный список 🌚\\n' 'идет следующий поиск...') self._next(user_id, user_token, user_firstname) if __name__", "2 if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/gender 1 или /gender", "user_token) random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if get_id", "пол?\\n пр. \"/gender 1\" -> девушка, \"/gender 2\" -> парень') return False if", "'свайп вправо'} if start and user_exist is False: self._send_msg_sign_up(user_id, user_firstname) elif step ==", "- не женат/не замужем\\n' '2 - есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5", "чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в черный список 🌚\\n' 'идет следующий поиск...') self._next(user_id,", "self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token, user_firstname)", "= UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token) random_id = self._generate_user(user_id,", "необходимые права 🐼 после нажмите на зеленую кнопку \"start\" ' keyboard = VkKeyboard(one_time=False)", "session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id)", "_unknown_command(self, peer_id, txt_msg) -> None: message = f\"неизвестная команда '{txt_msg}' 😞\\nнапишите -> start\"", "None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self, u_id,", "кнопке из меню чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id())", "session from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook class VKGroupManage: QUEUE = Queue() COMMANDS", "f'выдайте необходимые права 🐼 после нажмите на зеленую кнопку \"start\" ' keyboard =", "= text_msg in {'next', 'свайп вправо'} if start and user_exist is False: self._send_msg_sign_up(user_id,", "txt_msg) -> None: message = f\"неизвестная команда '{txt_msg}' 😞\\nнапишите -> start\" keyboard =", "and 1942 <= int(y) <= 2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я запомню вашу", "'Ваш пол?\\n пр. \"/gender 1\" -> девушка, \"/gender 2\" -> парень') return False", "False if usr_info['city'] is None: self._send_msg(u_id, 'Откуда вы? в формате => (RU,UA,BY,UZ) или", "пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self):", "1) t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return", "vk_group_token, group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api()", "Queue from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from vk_api.keyboard import VkKeyboard, VkKeyboardColor from vk_api.utils", "= self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app = UserApp(session) self.oauth_link = oauth_link self.u_vk_api =", "message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info() if user['dob'] is None", "in {'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg) else: user_exist = self.userapp_token.check_user(user_id) user_token =", "in {'next', 'свайп вправо'} if start and user_exist is False: self._send_msg_sign_up(user_id, user_firstname) elif", "return True def _check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id) usr_info = {'dob': usr.dob, 'city':", "BY Минск\"') return False if usr_info['gender'] is None: self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender", "txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/gender 1 или /gender 2') elif", "args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return False def", "познакомимся 🐼'} def __init__(self, vk_group_token, group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk,", "кнопку \"start\" ' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id,", "int(gender) in range(1, 3): self.user_app.update(u_id, int(gender), 'gender') return True self._send_msg(u_id, 'Неверный пол') return", "с ботом перейдите по кнопке снизу \"sign up 📝\" и ' \\ f'выдайте", "== '/gender': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token)", "= UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id,", "или /gender 2') elif text_msg.split()[0] == '/re': txt_c = len(text_msg.split()) == 2 if", "peer_id) -> None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное',", "в формате: -> /dob D.M.YYYY (от 9 до 80 лет допускается)' '\\nпр. \"/dob", "m.isdigit() and y.isdigit(): if 1 <= int(d) <= 31 and 1 <= int(m)", "Минск\"') return False if usr_info['gender'] is None: self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender 1\"", "= self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging", "self._send_msg(u_id, f'{name}, подходящих пользователей не найдено... вернитесь чуть позже😓') return None r_usr =", "3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно нашли подходящего", "вашу днюху ☺️') return True self._send_msg(u_id, 'Дата указана неверено') return False def _c_city(self,", "= self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city} ☺️') return True", "u_id, answer) -> bool: if '.' in answer: num = answer.split('.') if len(num)", "elif text_msg == 'ну...давай позже 😔': self._send_bye(user_id, user_firstname) elif text_msg == 'а давай", "перейти к поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай", "None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return True def _re_check(self, u_id, u_token): if self._check_new_usr_info(u_id):", "VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return False def _c_dob(self, u_id, answer)", "VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not in {'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg) else:", "and user_exist: if start: self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif", "t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True", "'пользователь добавлен в избраный список ⭐\\n' 'идет следующий поиск...️') self._next(user_id, user_token, user_firstname) elif", "keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg)", "None: message = f'{usr_name}, <NAME> вызвать меня сможете написав -> start или по", "not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) -> None: usr_search =", "_c_relation(self, u_id, relation) -> bool: if relation.isdigit() and int(relation) in range(1, 9): self.user_app.update(u_id,", "1 and user_exist: if start: self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу 🐼') self._ask_to_move_msg(user_id)", "по кнопке снизу \"sign up 📝\" и ' \\ f'выдайте необходимые права 🐼", "избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self,", "или \"/from BY Минск\"') return False if usr_info['gender'] is None: self._send_msg(u_id, 'Ваш пол?\\n", "UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token) random_id = self._generate_user(user_id, user_firstname,", "== 3 if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/gender':", "ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city} ☺️') return True self._send_msg(u_id, 'Страна/город указан неверено') return", "VkKeyboardColor from vk_api.utils import get_random_id from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session from", "'приветствую, нажмите на кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id)", "int(relation) in range(1, 9): self.user_app.update(u_id, int(relation), 'relation') return True self._send_msg(u_id, 'Семейное положение указан", "usr_search.check_users_existence() is None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def", "подходящих пользователей не найдено... вернитесь чуть позже😓') return None r_usr = usr_search_list.select_random_row() attach", "self._ask_to_move_msg(u_id) return True def _re_check(self, u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t =", "def _send_bye(self, peer_id, usr_name) -> None: message = f'{usr_name}, <NAME> вызвать меня сможете", "color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg) -> None: message =", "всё сложно\\n' '6 - в активном поиске\\n7 - влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр.", "message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) -> None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо',", "!= 3 or not 1942 <= int(user['dob'].split('.')[2]) < 2014: user['dob'] = None if", "парень') return False if usr_info['relation'] is None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return True", "_generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name}, подходящих", "VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app = UserApp(session) self.oauth_link =", "друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё сложно\\n' '6 - в активном", "is None: self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender 1\" -> девушка, \"/gender 2\" ->", "num[1], num[2] if d.isdigit() and m.isdigit() and y.isdigit(): if 1 <= int(d) <=", "вы? в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр. \"/from 🇺🇦", "'2 - есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё сложно\\n' '6", "формате: -> /dob D.M.YYYY (от 9 до 80 лет допускается)' '\\nпр. \"/dob 15.7.1990\"", "- в активном поиске\\n7 - влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id,", "self.userapp_token.get_step(user_id) start = text_msg in {'start', 'начать'} next_ = text_msg in {'next', 'свайп", "oauth_link self.u_vk_api = None def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token,", "<= int(y) <= 2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я запомню вашу днюху ☺️')", "нажмите на зеленую кнопку \"start\" ' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link)", "class VKLaunchGroup(VKGroupManage): def start(self): for event in self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW: user_id", "кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg ==", "txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/re 1-6') elif step == 1", "text_msg == 'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в избраный список ⭐\\n'", "elif text_msg == 'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в избраный список", "f'{usr_name}, <NAME> вызвать меня сможете написав -> start или по кнопке из меню", "= VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id,", "country['items'][0]['id'] if country_id != 0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city')", "= VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app = UserApp(session) self.oauth_link", "= self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self, u_id, name, usr_search_list,", "message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id) -> None: message = ('Ваше семейное положение?", "usr_cook, search_engine): if usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name}, подходящих пользователей не найдено... вернитесь", "список ⭐\\n' 'идет следующий поиск...️') self._next(user_id, user_token, user_firstname) elif text_msg == 'доб. в", "- есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё сложно\\n' '6 -", "text_msg in {'next', 'свайп вправо'} if start and user_exist is False: self._send_msg_sign_up(user_id, user_firstname)", "в избраный список ⭐\\n' 'идет следующий поиск...️') self._next(user_id, user_token, user_firstname) elif text_msg ==", "избраный список ⭐\\n' 'идет следующий поиск...️') self._next(user_id, user_token, user_firstname) elif text_msg == 'доб.", "usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging def _send_msg(self, peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id,", "datetime as dt import flag from queue import Queue from vk_api.bot_longpoll import VkBotLongPoll,", "if len(num) == 3: d, m, y = num[0], num[1], num[2] if d.isdigit()", "False self._ask_to_move_msg(u_id) return True def _re_check(self, u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t", "text_msg == 'ну...давай позже 😔': self._send_bye(user_id, user_firstname) elif text_msg == 'а давай познакомимся", "_move_to_black(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id", "\\ text_msg.split()[0] not in {'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg) else: user_exist =", "= self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start = text_msg in {'start', 'начать'} next_ =", "/gender 2') elif text_msg.split()[0] == '/re': txt_c = len(text_msg.split()) == 2 if txt_c:", "elif text_msg == 'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в черный список", "== 0 and user_exist: if start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg == 'ну...давай позже", "= UserSearchList(u_id, session) s_engine = SearchEngine(u_id, user_token) if usr_search.check_users_existence() is None: usr =", "🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/dob': txt_c = len(text_msg.split()) == 2", "'идет следующий поиск...️') self._next(user_id, user_token, user_firstname) elif text_msg == 'доб. в чс': self._move_to_black(user_id)", "class VKGroupManage: QUEUE = Queue() COMMANDS = {'start', 'начать', 'search', 'свайп вправо', 'доб.", "None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line()", "txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0] == '/from':", "self._re_check(user_id, user_token) else: self._resend(user_id, '/re 1-6') elif step == 1 and user_exist: if", "and y.isdigit(): if 1 <= int(d) <= 31 and 1 <= int(m) <=", "def _c_dob(self, u_id, answer) -> bool: if '.' in answer: num = answer.split('.')", "_c_gender(self, u_id, gender) -> bool: if gender.isdigit() and int(gender) in range(1, 3): self.user_app.update(u_id,", "None: message = f'{firstname}, мы все еще не знакомы... давай познакомимся? 🐼\\n' \\", "помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё сложно\\n' '6 - в активном поиске\\n7 - влюблён/влюблена\\n8", "-> bool: if '.' in answer: num = answer.split('.') if len(num) == 3:", "None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return True def _check_new_usr_info(self, u_id):", "== 3: d, m, y = num[0], num[1], num[2] if d.isdigit() and m.isdigit()", "oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token =", "= self.user_app.get_user(u_id) usr_info = {'dob': usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation': usr.relation} if", "'Напишите дату рождения в формате: -> /dob D.M.YYYY (от 9 до 80 лет", "if gender.isdigit() and int(gender) in range(1, 3): self.user_app.update(u_id, int(gender), 'gender') return True self._send_msg(u_id,", "r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self): for event in self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW:", "= self.userapp_token.get_last_searched_id(user_id) if get_id is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id)", "'Страна/город указан неверено') return False def _c_gender(self, u_id, gender) -> bool: if gender.isdigit()", "' \\ f'выдайте необходимые права 🐼 после нажмите на зеленую кнопку \"start\" '", "(🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр. \"/from 🇺🇦 Киев\" или \"/from 🇷🇺 Москва\" или", "True def _check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id) usr_info = {'dob': usr.dob, 'city': usr.city,", "{usr_name}, для работы с ботом перейдите по кнопке снизу \"sign up 📝\" и", "return False def _c_city(self, u_id, country, city) -> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api", "1 - 8\\n\\n1 - не женат/не замужем\\n' '2 - есть друг/есть подруга\\n3 -", "peer_id, txt_msg) -> None: message = f\"неизвестная команда '{txt_msg}' 😞\\nнапишите -> start\" keyboard", "чс', 'ну...давай позже 😔', 'а давай познакомимся 🐼'} def __init__(self, vk_group_token, group_id, oauth_link):", "relation.isdigit() and int(relation) in range(1, 9): self.user_app.update(u_id, int(relation), 'relation') return True self._send_msg(u_id, 'Семейное", "if text_msg not in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not in {'/dob', '/from', '/gender',", "'доб. в избранное', 'доб. в чс', 'ну...давай позже 😔', 'а давай познакомимся 🐼'}", "и ' \\ f'выдайте необходимые права 🐼 после нажмите на зеленую кнопку \"start\"", "import datetime as dt import flag from queue import Queue from vk_api.bot_longpoll import", "SearchEngine(u_id, user_token) if usr_search.check_users_existence() is None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender,", "познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def", "вправо'} if start and user_exist is False: self._send_msg_sign_up(user_id, user_firstname) elif step == 0", "2014: user['dob'] = None if user['gender'] == 0: user['gender'] = None if user['relation']", "'начать'} next_ = text_msg in {'next', 'свайп вправо'} if start and user_exist is", "сможете написав -> start или по кнопке из меню чата' keyboard = VkKeyboard(one_time=True)", "днюху ☺️') return True self._send_msg(u_id, 'Дата указана неверено') return False def _c_city(self, u_id,", "self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info() if user['dob'] is", "подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё сложно\\n' '6 - в активном поиске\\n7", "_move_to_fav(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id", "_search_users(self, u_id, user_token): usr_search = UserSearchList(u_id, session) s_engine = SearchEngine(u_id, user_token) if usr_search.check_users_existence()", "'/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg) else: user_exist = self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step", "'ну...давай позже 😔', 'а давай познакомимся 🐼'} def __init__(self, vk_group_token, group_id, oauth_link): self.vk", "🇺🇦 Киев\" или \"/from 🇷🇺 Москва\" или \"/from BY Минск\"') return False if", "'свайп вправо', 'доб. в избранное', 'доб. в чс', 'ну...давай позже 😔', 'а давай", "или \"/from 🇷🇺 Москва\" или \"/from BY Минск\"') return False if usr_info['gender'] is", "self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в черный список 🌚\\n' 'идет следующий поиск...') self._next(user_id, user_token,", "threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return False", "message = f'{usr_name}, <NAME> вызвать меня сможете написав -> start или по кнопке", "r_usr = usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id,", "self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app = UserApp(session) self.oauth_link = oauth_link self.u_vk_api = None", "= None def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token, user_firstname) ->", "чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id,", "while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return False def _c_dob(self, u_id, answer) ->", "return False if usr_info['city'] is None: self._send_msg(u_id, 'Откуда вы? в формате => (RU,UA,BY,UZ)", "not in {'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg) else: user_exist = self.userapp_token.check_user(user_id) user_token", "else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) -> None: usr_search = UserSearchList(user_id, session)", "event.type == VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New", "def _c_relation(self, u_id, relation) -> bool: if relation.isdigit() and int(relation) in range(1, 9):", "is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id)", "позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id, usr_name) -> None:", "None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) -> None:", "-> start или по кнопке из меню чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY)", "познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/dob': txt_c = len(text_msg.split()) ==", "-> None: message = f'Хаю-Хай 🐍 {usr_name}, для работы с ботом перейдите по", "start: self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token,", "в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в избраный список ⭐\\n' 'идет следующий поиск...️')", "relation=usr.relation) def _generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence() is None: self._send_msg(u_id,", "еще не знакомы... давай познакомимся? 🐼\\n' \\ f'(нужно познакомится с пандой, чтобы перейти", "1 <= int(m) <= 12 and 1942 <= int(y) <= 2013: self.user_app.update(u_id, answer,", "user_firstname) self._ask_to_move_msg(user_id) elif text_msg == 'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в", "UserSearchList(u_id, session) s_engine = SearchEngine(u_id, user_token) if usr_search.check_users_existence() is None: usr = self.user_app.get_user(u_id)", "на зеленую кнопку \"start\" ' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start',", "указана неверено') return False def _c_city(self, u_id, country, city) -> bool: vk =", "in range(1, 3): self.user_app.update(u_id, int(gender), 'gender') return True self._send_msg(u_id, 'Неверный пол') return False", "keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id())", "- в гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id)", "женат/замужем\\n5 - всё сложно\\n' '6 - в активном поиске\\n7 - влюблён/влюблена\\n8 - в", "не женат/не замужем\\n' '2 - есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5 -", "VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return False def _c_dob(self, u_id, answer) -> bool: if", "'\\nпр. \"/dob 15.7.1990\" ') return False if usr_info['city'] is None: self._send_msg(u_id, 'Откуда вы?", "== 'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в избраный список ⭐\\n' 'идет", "_send_msg(self, peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _resend(self, peer_id, value:", "Messaging def _send_msg(self, peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _resend(self,", "замужем\\n' '2 - есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё сложно\\n'", "self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id)", "y.isdigit(): if 1 <= int(d) <= 31 and 1 <= int(m) <= 12", "msg from {user_id}, text: {text_msg} \") if text_msg not in VKLaunchGroup.COMMANDS and \\", "'/re': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else:", "self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в избраный список ⭐\\n' 'идет следующий поиск...️') self._next(user_id, user_token,", "'Откуда вы? в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр. \"/from", "{r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self): for event in self.long_poll.listen():", "self._send_msg_signed_in(user_id, user_firstname) elif text_msg == 'ну...давай позже 😔': self._send_bye(user_id, user_firstname) elif text_msg ==", "UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook class VKGroupManage: QUEUE =", "указан неверно') return False def _search_users(self, u_id, user_token): usr_search = UserSearchList(u_id, session) s_engine", "search_engine) self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id())", "self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging def", "num = answer.split('.') if len(num) == 3: d, m, y = num[0], num[1],", "поиск...️') self._next(user_id, user_token, user_firstname) elif text_msg == 'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь", "elif text_msg.split()[0] == '/from': txt_c = len(text_msg.split()) == 3 if txt_c: self._c_city(user_id, text_msg.split()[1],", "peer_id, usr_name) -> None: message = f'Хаю-Хай 🐍 {usr_name}, для работы с ботом", "country_id = country['items'][0]['id'] if country_id != 0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id,", "keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id) ->", "ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city} ☺️') return", "🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg == 'доб. в", "self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city} ☺️') return True self._send_msg(u_id,", "message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self): for event in", "num[2] if d.isdigit() and m.isdigit() and y.isdigit(): if 1 <= int(d) <= 31", "import vk_api import datetime as dt import flag from queue import Queue from", "self._re_check(user_id, user_token) else: self._resend(user_id, '/gender 1 или /gender 2') elif text_msg.split()[0] == '/re':", "допускается)' '\\nпр. \"/dob 15.7.1990\" ') return False if usr_info['city'] is None: self._send_msg(u_id, 'Откуда", "девушка, \"/gender 2\" -> парень') return False if usr_info['relation'] is None: self._ask_relation_msg(u_id) return", "message = f\"неизвестная команда '{txt_msg}' 😞\\nнапишите -> start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY)", "9 до 80 лет допускается)' '\\nпр. \"/dob 15.7.1990\" ') return False if usr_info['city']", "в гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) ->", "len(num) == 3: d, m, y = num[0], num[1], num[2] if d.isdigit() and", "= len(text_msg.split()) == 3 if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0]", "0: user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return True", "import get_random_id from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte import SearchEngine,", "1-6') elif step == 1 and user_exist: if start: self._send_msg(user_id, 'приветствую, нажмите на", "= Queue() COMMANDS = {'start', 'начать', 'search', 'свайп вправо', 'доб. в избранное', 'доб.", "if start and user_exist is False: self._send_msg_sign_up(user_id, user_firstname) elif step == 0 and", "-> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _resend(self, peer_id, value: str): message =", "keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname) -> None: message = f'{firstname}, мы все", "d.isdigit() and m.isdigit() and y.isdigit(): if 1 <= int(d) <= 31 and 1", "flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if country_id != 0: ci", "def __init__(self, vk_group_token, group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api", "зеленую кнопку \"start\" ' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE)", "VkBotLongPoll, VkBotEventType from vk_api.keyboard import VkKeyboard, VkKeyboardColor from vk_api.utils import get_random_id from src.vk_chat_bot.db.database", "'relation') return True self._send_msg(u_id, 'Семейное положение указан неверно') return False def _search_users(self, u_id,", "self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _resend(self, peer_id, value: str): message = f'Неверный формат,", "= country['items'][0]['id'] if country_id != 0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'],", "VkKeyboard, VkKeyboardColor from vk_api.utils import get_random_id from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session", "start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg == 'ну...давай позже 😔': self._send_bye(user_id, user_firstname) elif text_msg", "step == 0 and user_exist: if start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg == 'ну...давай", "self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return True def _re_check(self, u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id,", "text_msg.split()[0] == '/re': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id,", "gender.isdigit() and int(gender) in range(1, 3): self.user_app.update(u_id, int(gender), 'gender') return True self._send_msg(u_id, 'Неверный", "в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр. \"/from 🇺🇦 Киев\"", "self._send_msg(u_id, 'Дата указана неверено') return False def _c_city(self, u_id, country, city) -> bool:", "txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/gender': txt_c = len(text_msg.split())", "влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self,", "3): self.user_app.update(u_id, int(gender), 'gender') return True self._send_msg(u_id, 'Неверный пол') return False def _c_relation(self,", "-> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country", "None: self._send_msg(u_id, 'Откуда вы? в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город'", "user_id = event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id},", "step = self.userapp_token.get_step(user_id) start = text_msg in {'start', 'начать'} next_ = text_msg in", "# Messaging def _send_msg(self, peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def", "в черный список 🌚\\n' 'идет следующий поиск...') self._next(user_id, user_token, user_firstname) if __name__ ==", "🐍 {usr_name}, для работы с ботом перейдите по кнопке снизу \"sign up 📝\"", "not 1942 <= int(user['dob'].split('.')[2]) < 2014: user['dob'] = None if user['gender'] == 0:", "🇷🇺 Москва\" или \"/from BY Минск\"') return False if usr_info['gender'] is None: self._send_msg(u_id,", "== 'а давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/dob': txt_c", "' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(),", "name, usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name}, подходящих пользователей не", "usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) -> None: usr_search = UserSearchList(user_id, session)", "u_id, country, city) -> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag =", "-> bool: if gender.isdigit() and int(gender) in range(1, 3): self.user_app.update(u_id, int(gender), 'gender') return", "\\ f'выдайте необходимые права 🐼 после нажмите на зеленую кнопку \"start\" ' keyboard", "as dt import flag from queue import Queue from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType", "self._send_bye(user_id, user_firstname) elif text_msg == 'а давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif", "\"/re\" и цифру от 1 - 8\\n\\n1 - не женат/не замужем\\n' '2 -", "- влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def", "_get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info() if user['dob'] is None or len(user['dob'].split('.')) != 3", "= len(text_msg.split()) == 2 if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/gender", "text_msg not in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not in {'/dob', '/from', '/gender', '/re'}:", "None def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token, user_firstname) -> None:", "позже 😔', 'а давай познакомимся 🐼'} def __init__(self, vk_group_token, group_id, oauth_link): self.vk =", "<NAME> вызвать меня сможете написав -> start или по кнопке из меню чата'", "usr.relation} if usr_info['dob'] is None: self._send_msg(u_id, 'Напишите дату рождения в формате: -> /dob", "return False if usr_info['relation'] is None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return True def", "y = num[0], num[1], num[2] if d.isdigit() and m.isdigit() and y.isdigit(): if 1", "self.user_app.update(u_id, int(gender), 'gender') return True self._send_msg(u_id, 'Неверный пол') return False def _c_relation(self, u_id,", "from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from vk_api.keyboard import VkKeyboard, VkKeyboardColor from vk_api.utils import", "user['dob'] is None or len(user['dob'].split('.')) != 3 or not 1942 <= int(user['dob'].split('.')[2]) <", "'Неверный пол') return False def _c_relation(self, u_id, relation) -> bool: if relation.isdigit() and", "self._send_msg(u_id, 'Семейное положение указан неверно') return False def _search_users(self, u_id, user_token): usr_search =", "text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/gender': txt_c = len(text_msg.split()) == 2", "'.' in answer: num = answer.split('.') if len(num) == 3: d, m, y", "_resend(self, peer_id, value: str): message = f'Неверный формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message,", "VkBotEventType from vk_api.keyboard import VkKeyboard, VkKeyboardColor from vk_api.utils import get_random_id from src.vk_chat_bot.db.database import", "country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if country_id !=", "🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self): for", "self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id}, text: {text_msg} \") if text_msg", "вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id,", "usr.gender, 'relation': usr.relation} if usr_info['dob'] is None: self._send_msg(u_id, 'Напишите дату рождения в формате:", "self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return", "self._re_check(user_id, user_token) elif text_msg.split()[0] == '/dob': txt_c = len(text_msg.split()) == 2 if txt_c:", "user_token) elif text_msg.split()[0] == '/gender': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_gender(user_id,", "гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) -> None:", "answer) -> bool: if '.' in answer: num = answer.split('.') if len(num) ==", "attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self): for event in self.long_poll.listen(): if", "Queue() COMMANDS = {'start', 'начать', 'search', 'свайп вправо', 'доб. в избранное', 'доб. в", "None if user['relation'] == 0: user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'],", "self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app = UserApp(session)", "= text_msg in {'start', 'начать'} next_ = text_msg in {'next', 'свайп вправо'} if", "True self._send_msg(u_id, 'Неверный пол') return False def _c_relation(self, u_id, relation) -> bool: if", "\\ f'(нужно познакомится с пандой, чтобы перейти к поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а", "'relation': usr.relation} if usr_info['dob'] is None: self._send_msg(u_id, 'Напишите дату рождения в формате: ->", "-> /dob D.M.YYYY (от 9 до 80 лет допускается)' '\\nпр. \"/dob 15.7.1990\" ')", "= event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id}, text: {text_msg} \") if text_msg not in", "is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) -> None: usr_search", "None: usr_search = UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token) random_id", "= vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app", "рождения в формате: -> /dob D.M.YYYY (от 9 до 80 лет допускается)' '\\nпр.", "user_exist = self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start = text_msg in", "{'start', 'начать', 'search', 'свайп вправо', 'доб. в избранное', 'доб. в чс', 'ну...давай позже", "self._send_msg(user_id, 'пользователь добавлен в черный список 🌚\\n' 'идет следующий поиск...') self._next(user_id, user_token, user_firstname)", "user['gender'] = None if user['relation'] == 0: user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'],", "15.7.1990\" ') return False if usr_info['city'] is None: self._send_msg(u_id, 'Откуда вы? в формате", "else: self._resend(user_id, '/gender 1 или /gender 2') elif text_msg.split()[0] == '/re': txt_c =", "event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id}, text: {text_msg}", "self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id", "self.vk_api = self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app = UserApp(session) self.oauth_link = oauth_link self.u_vk_api", "не найдено... вернитесь чуть позже😓') return None r_usr = usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id)", "self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id) -> None: message = ('Ваше семейное", "запомню вашу днюху ☺️') return True self._send_msg(u_id, 'Дата указана неверено') return False def", "subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if country_id != 0: ci =", "VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс',", "usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach,", "self.u_vk_api = vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id']", "def _get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info() if user['dob'] is None or len(user['dob'].split('.')) !=", "return True self._send_msg(u_id, 'Дата указана неверено') return False def _c_city(self, u_id, country, city)", "keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name) -> None: message = f'Хаю-Хай 🐍 {usr_name},", "usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name}, подходящих пользователей не найдено... вернитесь чуть позже😓') return", "vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from vk_api.keyboard import VkKeyboard, VkKeyboardColor from vk_api.utils import get_random_id", "\"/dob 15.7.1990\" ') return False if usr_info['city'] is None: self._send_msg(u_id, 'Откуда вы? в", "answer: num = answer.split('.') if len(num) == 3: d, m, y = num[0],", "name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname}", "usr_info['dob'] is None: self._send_msg(u_id, 'Напишите дату рождения в формате: -> /dob D.M.YYYY (от", "firstname) -> None: message = f'{firstname}, мы все еще не знакомы... давай познакомимся?", "self._send_msg(u_id, 'Напишите дату рождения в формате: -> /dob D.M.YYYY (от 9 до 80", "лет допускается)' '\\nпр. \"/dob 15.7.1990\" ') return False if usr_info['city'] is None: self._send_msg(u_id,", "user_token) else: self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0] == '/from': txt_c = len(text_msg.split()) ==", "-> None: message = f'{usr_name}, <NAME> вызвать меня сможете написав -> start или", "меня сможете написав -> start или по кнопке из меню чата' keyboard =", "женат/не замужем\\n' '2 - есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё", "f'Неверный формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name)", "def _ask_relation_msg(self, peer_id) -> None: message = ('Ваше семейное положение? Отправьте \"/re\" и", "self._unknown_command(user_id, text_msg) else: user_exist = self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start", "if user_id is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) ->", "self.user_app.get_user(u_id) usr_info = {'dob': usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation': usr.relation} if usr_info['dob']", "return False def _c_dob(self, u_id, answer) -> bool: if '.' in answer: num", "на кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg", "self.u_vk_api = None def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token, user_firstname)", "keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id) -> None: message = ('Ваше семейное положение? Отправьте", "queue import Queue from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from vk_api.keyboard import VkKeyboard, VkKeyboardColor", "= VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token) random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine)", "from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook", "usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) -> None: usr_search", "= VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg) ->", "text_msg.split()[0] == '/gender': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id,", "and int(relation) in range(1, 9): self.user_app.update(u_id, int(relation), 'relation') return True self._send_msg(u_id, 'Семейное положение", "список 🌚\\n' 'идет следующий поиск...') self._next(user_id, user_token, user_firstname) if __name__ == '__main__': pass", "чтобы перейти к поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line()", "'я запомню вашу днюху ☺️') return True self._send_msg(u_id, 'Дата указана неверено') return False", "'6 - в активном поиске\\n7 - влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр. \"/re 6\"')", "/dob D.M.YYYY (от 9 до 80 лет допускается)' '\\nпр. \"/dob 15.7.1990\" ') return", "keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE)", "и цифру от 1 - 8\\n\\n1 - не женат/не замужем\\n' '2 - есть", "text: {text_msg} \") if text_msg not in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not in", "city=user['city'], relation=user['relation']) return True def _check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id) usr_info = {'dob':", "поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔',", "is None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return True def _re_check(self, u_id, u_token): if", "None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None:", "int(gender), 'gender') return True self._send_msg(u_id, 'Неверный пол') return False def _c_relation(self, u_id, relation)", "peer_id, value: str): message = f'Неверный формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None,", "self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if get_id is not None:", "UserAppToken, UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook class VKGroupManage: QUEUE", "VkUserCook class VKGroupManage: QUEUE = Queue() COMMANDS = {'start', 'начать', 'search', 'свайп вправо',", "f'{firstname}, мы все еще не знакомы... давай познакомимся? 🐼\\n' \\ f'(нужно познакомится с", "or len(user['dob'].split('.')) != 3 or not 1942 <= int(user['dob'].split('.')[2]) < 2014: user['dob'] =", "int(user['dob'].split('.')[2]) < 2014: user['dob'] = None if user['gender'] == 0: user['gender'] = None", "= self.userapp_token.get_step(user_id) start = text_msg in {'start', 'начать'} next_ = text_msg in {'next',", "к поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже", "start and user_exist is False: self._send_msg_sign_up(user_id, user_firstname) elif step == 0 and user_exist:", "2 if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/re 1-6') elif step", "self._ask_to_move_msg(user_id) # Messaging def _send_msg(self, peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id())", "elif text_msg.split()[0] == '/dob': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_dob(user_id, text_msg.split()[1])", "self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token, user_firstname) -> None: usr_search = UserSearchList(user_id, session) v_usr_cook", "color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id) -> None: message = ('Ваше", "поиске\\n7 - влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id())", "положение указан неверно') return False def _search_users(self, u_id, user_token): usr_search = UserSearchList(u_id, session)", "s_engine = SearchEngine(u_id, user_token) if usr_search.check_users_existence() is None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year -", "def _generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name},", "{'dob': usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation': usr.relation} if usr_info['dob'] is None: self._send_msg(u_id,", "usr_info = {'dob': usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation': usr.relation} if usr_info['dob'] is", "'/gender 1 или /gender 2') elif text_msg.split()[0] == '/re': txt_c = len(text_msg.split()) ==", "для работы с ботом перейдите по кнопке снизу \"sign up 📝\" и '", "False def _c_gender(self, u_id, gender) -> bool: if gender.isdigit() and int(gender) in range(1,", "message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _resend(self, peer_id, value: str): message", "user['relation'] == 0: user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation'])", "'gender': usr.gender, 'relation': usr.relation} if usr_info['dob'] is None: self._send_msg(u_id, 'Напишите дату рождения в", "get_id = self.userapp_token.get_last_searched_id(user_id) if get_id is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id,", "позже😓') return None r_usr = usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3:", "😔': self._send_bye(user_id, user_firstname) elif text_msg == 'а давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token)", "random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg) -> None: message = f\"неизвестная команда '{txt_msg}' 😞\\nнапишите", "txt_c = len(text_msg.split()) == 2 if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id,", "s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if get_id is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else:", "None: self._send_msg(u_id, 'Напишите дату рождения в формате: -> /dob D.M.YYYY (от 9 до", "🐼'} def __init__(self, vk_group_token, group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id)", "давай познакомимся 🐼'} def __init__(self, vk_group_token, group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll =", "from {user_id}, text: {text_msg} \") if text_msg not in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0]", "txt_c = len(text_msg.split()) == 2 if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id,", "peer_id, firstname) -> None: message = f'{firstname}, мы все еще не знакомы... давай", "user_firstname = self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id}, text: {text_msg} \")", "not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging def _send_msg(self, peer_id, message) ->", "'/re'}: self._unknown_command(user_id, text_msg) else: user_exist = self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id)", "user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token, user_firstname) -> None: usr_search = UserSearchList(user_id,", "3 if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/gender': txt_c", "random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) -> None: usr_search = UserSearchList(user_id,", "text_msg == 'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в черный список 🌚\\n'", "self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name) -> None: message = f'Хаю-Хай", "dt import flag from queue import Queue from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from", "import VkBotLongPoll, VkBotEventType from vk_api.keyboard import VkKeyboard, VkKeyboardColor from vk_api.utils import get_random_id from", "from queue import Queue from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from vk_api.keyboard import VkKeyboard,", "vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag)", "self._send_msg(user_id, 'пользователь добавлен в избраный список ⭐\\n' 'идет следующий поиск...️') self._next(user_id, user_token, user_firstname)", "{user_id}, text: {text_msg} \") if text_msg not in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not", "u_id): usr = self.user_app.get_user(u_id) usr_info = {'dob': usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation':", "__init__(self, vk_group_token, group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api =", "= f'Неверный формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id,", "'gender') return True self._send_msg(u_id, 'Неверный пол') return False def _c_relation(self, u_id, relation) ->", "user_firstname) elif text_msg == 'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в черный", "= UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id,", "if 1 <= int(d) <= 31 and 1 <= int(m) <= 12 and", "VKLaunchGroup(VKGroupManage): def start(self): for event in self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW: user_id =", "{'start', 'начать'} next_ = text_msg in {'next', 'свайп вправо'} if start and user_exist", "= len(text_msg.split()) == 2 if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/dob", "неверено') return False def _c_city(self, u_id, country, city) -> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id))", "usr_name) -> None: message = f'{usr_name}, <NAME> вызвать меня сможете написав -> start", "daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return False def _c_dob(self, u_id,", "= flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if country_id != 0:", "usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name}, подходящих пользователей не найдено...", "после нажмите на зеленую кнопку \"start\" ' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝',", "keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id) -> None: message =", "user_id, user_token, user_firstname) -> None: usr_search = UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token) s_engine", "if '.' in answer: num = answer.split('.') if len(num) == 3: d, m,", "'dob') self._send_msg(u_id, 'я запомню вашу днюху ☺️') return True self._send_msg(u_id, 'Дата указана неверено')", "неверено') return False def _c_gender(self, u_id, gender) -> bool: if gender.isdigit() and int(gender)", "self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) -> None: usr_search =", "gender) -> bool: if gender.isdigit() and int(gender) in range(1, 3): self.user_app.update(u_id, int(gender), 'gender')", "for event in self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname =", "True self._send_msg(u_id, 'Страна/город указан неверено') return False def _c_gender(self, u_id, gender) -> bool:", "group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token", "<= int(user['dob'].split('.')[2]) < 2014: user['dob'] = None if user['gender'] == 0: user['gender'] =", "работы с ботом перейдите по кнопке снизу \"sign up 📝\" и ' \\", "user_token, user_firstname) -> None: usr_search = UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token) s_engine =", "формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр. \"/from 🇺🇦 Киев\" или", "text_msg.split()[0] == '/from': txt_c = len(text_msg.split()) == 3 if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2])", "if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0] ==", "return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей 🐼')", "if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/re 1-6') elif step ==", "'{txt_msg}' 😞\\nнапишите -> start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id())", "-> None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not", "usr = self.user_app.get_user(u_id) usr_info = {'dob': usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation': usr.relation}", "return False if usr_info['gender'] is None: self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender 1\" ->", "'доб. в чс', 'ну...давай позже 😔', 'а давай познакомимся 🐼'} def __init__(self, vk_group_token,", "UserAppToken(session) self.user_app = UserApp(session) self.oauth_link = oauth_link self.u_vk_api = None def _get_firstname(self, user_id):", "<= 2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я запомню вашу днюху ☺️') return True", "(RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр. \"/from 🇺🇦 Киев\" или \"/from 🇷🇺", "bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country =", "keyboard=None, random_id=get_random_id()) def _resend(self, peer_id, value: str): message = f'Неверный формат, правильный формат:", "elif step == 0 and user_exist: if start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg ==", "flag from queue import Queue from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from vk_api.keyboard import", "is None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self,", "self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start = text_msg in {'start', 'начать'}", "2 if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0]", "usr_search, v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if get_id is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id,", "_check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id) usr_info = {'dob': usr.dob, 'city': usr.city, 'gender': usr.gender,", "user_token) if usr_search.check_users_existence() is None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city,", "пользователей не найдено... вернитесь чуть позже😓') return None r_usr = usr_search_list.select_random_row() attach =", "keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id,", "color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️',", "start или по кнопке из меню чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id,", "0: user['gender'] = None if user['relation'] == 0: user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'],", "VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self,", "self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start()", "self._ask_to_move_msg(user_id) elif text_msg == 'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен в избраный", "пандой, чтобы перейти к поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE)", "= VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def", "семейное положение? Отправьте \"/re\" и цифру от 1 - 8\\n\\n1 - не женат/не", "чуть позже😓') return None r_usr = usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) !=", "правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name) -> None:", "self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg =", "from vk_api.keyboard import VkKeyboard, VkKeyboardColor from vk_api.utils import get_random_id from src.vk_chat_bot.db.database import UserAppToken,", "= vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id", "self._re_check(user_id, user_token) elif text_msg.split()[0] == '/gender': txt_c = len(text_msg.split()) == 2 if txt_c:", "= {'start', 'начать', 'search', 'свайп вправо', 'доб. в избранное', 'доб. в чс', 'ну...давай", "False def _c_dob(self, u_id, answer) -> bool: if '.' in answer: num =", "= vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if", "next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg == 'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id,", "= f'{firstname}, мы все еще не знакомы... давай познакомимся? 🐼\\n' \\ f'(нужно познакомится", "text_msg == 'а давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/dob':", "text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/gender 1 или /gender 2') elif text_msg.split()[0] ==", "if country_id != 0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id,", "return True self._send_msg(u_id, 'Неверный пол') return False def _c_relation(self, u_id, relation) -> bool:", "self.userapp_token = UserAppToken(session) self.user_app = UserApp(session) self.oauth_link = oauth_link self.u_vk_api = None def", "None) self._ask_to_move_msg(user_id) # Messaging def _send_msg(self, peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None,", "vk_api import datetime as dt import flag from queue import Queue from vk_api.bot_longpoll", "def _unknown_command(self, peer_id, txt_msg) -> None: message = f\"неизвестная команда '{txt_msg}' 😞\\nнапишите ->", "== 1 and user_exist: if start: self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу 🐼')", "1942 <= int(y) <= 2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я запомню вашу днюху", "keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token): user", "'search', 'свайп вправо', 'доб. в избранное', 'доб. в чс', 'ну...давай позже 😔', 'а", "-> start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self,", "user = VKinderUser(u_token).get_info() if user['dob'] is None or len(user['dob'].split('.')) != 3 or not", "12 and 1942 <= int(y) <= 2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я запомню", "self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id,", "is False: self._send_msg_sign_up(user_id, user_firstname) elif step == 0 and user_exist: if start: self._send_msg_signed_in(user_id,", "'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в черный список 🌚\\n' 'идет следующий", "!= 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно нашли", "вызвать меня сможете написав -> start или по кнопке из меню чата' keyboard", "🐼 после нажмите на зеленую кнопку \"start\" ' keyboard = VkKeyboard(one_time=False) keyboard.add_openlink_button(label='sign up", "range(1, 3): self.user_app.update(u_id, int(gender), 'gender') return True self._send_msg(u_id, 'Неверный пол') return False def", "= VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id) -> None:", "-> парень') return False if usr_info['relation'] is None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return", "VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token) random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine) get_id", "в активном поиске\\n7 - влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message,", "bool: if gender.isdigit() and int(gender) in range(1, 3): self.user_app.update(u_id, int(gender), 'gender') return True", "answer, 'dob') self._send_msg(u_id, 'я запомню вашу днюху ☺️') return True self._send_msg(u_id, 'Дата указана", "user_token): usr_search = UserSearchList(u_id, session) s_engine = SearchEngine(u_id, user_token) if usr_search.check_users_existence() is None:", "def start(self): for event in self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id']", "= None if user['gender'] == 0: user['gender'] = None if user['relation'] == 0:", "if usr_search.check_users_existence() is None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation)", "False if usr_info['gender'] is None: self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender 1\" -> девушка,", "self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city} ☺️') return True self._send_msg(u_id, 'Страна/город указан неверено')", "else: user_exist = self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start = text_msg", "usr_info['city'] is None: self._send_msg(u_id, 'Откуда вы? в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и", "D.M.YYYY') elif text_msg.split()[0] == '/from': txt_c = len(text_msg.split()) == 3 if txt_c: self._c_city(user_id,", "VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id) -> None: message", "len(text_msg.split()) == 2 if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/gender 1", "self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender 1\" -> девушка, \"/gender 2\" -> парень') return", "get_id is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self,", "usr_search = UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token) random_id =", "u_id, relation) -> bool: if relation.isdigit() and int(relation) in range(1, 9): self.user_app.update(u_id, int(relation),", "<gh_stars>0 import threading import vk_api import datetime as dt import flag from queue", "vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id =", "= self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start = text_msg in {'start',", "text_msg.split()[0] == '/dob': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id,", "0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city} ☺️')", "f'{name}, подходящих пользователей не найдено... вернитесь чуть позже😓') return None r_usr = usr_search_list.select_random_row()", "u_id, user_token): usr_search = UserSearchList(u_id, session) s_engine = SearchEngine(u_id, user_token) if usr_search.check_users_existence() is", "int(m) <= 12 and 1942 <= int(y) <= 2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id,", "def _check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id) usr_info = {'dob': usr.dob, 'city': usr.city, 'gender':", "чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info() if", "and 1 <= int(m) <= 12 and 1942 <= int(y) <= 2013: self.user_app.update(u_id,", "print(f\"New msg from {user_id}, text: {text_msg} \") if text_msg not in VKLaunchGroup.COMMANDS and", "self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg) -> None: message = f\"неизвестная", "return True self._send_msg(u_id, 'Страна/город указан неверено') return False def _c_gender(self, u_id, gender) ->", "self._send_msg(u_id, 'Откуда вы? в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите город' '\\nпр.", "== 'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в черный список 🌚\\n' 'идет", "== VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg", "if user_id is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging def _send_msg(self,", "8\\n\\n1 - не женат/не замужем\\n' '2 - есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 -", "UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None)", "-> None: usr_search = UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token)", "def _resend(self, peer_id, value: str): message = f'Неверный формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id,", "message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname) -> None: message = f'{firstname}, мы", "user_token) else: self._resend(user_id, '/gender 1 или /gender 2') elif text_msg.split()[0] == '/re': txt_c", "self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id)", "вправо', 'доб. в избранное', 'доб. в чс', 'ну...давай позже 😔', 'а давай познакомимся", "user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg == 'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь добавлен", "random_id=get_random_id()) def _resend(self, peer_id, value: str): message = f'Неверный формат, правильный формат: {value}'", "if usr_info['gender'] is None: self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender 1\" -> девушка, \"/gender", "import SearchEngine, VKinderUser, VkUserCook class VKGroupManage: QUEUE = Queue() COMMANDS = {'start', 'начать',", "message = f'Хаю-Хай 🐍 {usr_name}, для работы с ботом перейдите по кнопке снизу", "u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t)", "random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) -> None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line()", "q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city} ☺️') return True self._send_msg(u_id, 'Страна/город", "start = text_msg in {'start', 'начать'} next_ = text_msg in {'next', 'свайп вправо'}", "value: str): message = f'Неверный формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id())", "text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/re 1-6') elif step == 1 and user_exist:", "random_id=get_random_id()) def _get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info() if user['dob'] is None or len(user['dob'].split('.'))", "= self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id}, text: {text_msg} \") if", "user_id is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging def _send_msg(self, peer_id,", "up 📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname)", "!= 0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city}", "random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname) -> None: message = f'{firstname}, мы все еще", "= f'{usr_name}, <NAME> вызвать меня сможете написав -> start или по кнопке из", "d, m, y = num[0], num[1], num[2] if d.isdigit() and m.isdigit() and y.isdigit():", "else: self._resend(user_id, '/re 1-6') elif step == 1 and user_exist: if start: self._send_msg(user_id,", "VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg from", "event in self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id)", "3: d, m, y = num[0], num[1], num[2] if d.isdigit() and m.isdigit() and", "message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name) -> None: message = f'Хаю-Хай 🐍", "VKGroupManage.QUEUE.get().start() return True return False def _c_dob(self, u_id, answer) -> bool: if '.'", "'/gender', '/re'}: self._unknown_command(user_id, text_msg) else: user_exist = self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id) step =", "user['gender'] == 0: user['gender'] = None if user['relation'] == 0: user['relation'] = None", "- женат/замужем\\n5 - всё сложно\\n' '6 - в активном поиске\\n7 - влюблён/влюблена\\n8 -", "пол') return False def _c_relation(self, u_id, relation) -> bool: if relation.isdigit() and int(relation)", "_send_bye(self, peer_id, usr_name) -> None: message = f'{usr_name}, <NAME> вызвать меня сможете написав", "'/dob D.M.YYYY') elif text_msg.split()[0] == '/from': txt_c = len(text_msg.split()) == 3 if txt_c:", "= usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine)", "random_id=get_random_id()) def _ask_relation_msg(self, peer_id) -> None: message = ('Ваше семейное положение? Отправьте \"/re\"", "'/from': txt_c = len(text_msg.split()) == 3 if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token)", "if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not", "<= 12 and 1942 <= int(y) <= 2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я", "self._send_msg(u_id, f'{country} {city} ☺️') return True self._send_msg(u_id, 'Страна/город указан неверено') return False def", "Киев\" или \"/from 🇷🇺 Москва\" или \"/from BY Минск\"') return False if usr_info['gender']", "\") if text_msg not in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not in {'/dob', '/from',", "self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty():", "('Ваше семейное положение? Отправьте \"/re\" и цифру от 1 - 8\\n\\n1 - не", "up 📝\" и ' \\ f'выдайте необходимые права 🐼 после нажмите на зеленую", "peer_id) -> None: message = ('Ваше семейное положение? Отправьте \"/re\" и цифру от", "attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list, usr_cook,", "избранное', 'доб. в чс', 'ну...давай позже 😔', 'а давай познакомимся 🐼'} def __init__(self,", "color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info() if user['dob']", "src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook class", "UserApp(session) self.oauth_link = oauth_link self.u_vk_api = None def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def", "usr_info['relation'] is None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return True def _re_check(self, u_id, u_token):", "in {'start', 'начать'} next_ = text_msg in {'next', 'свайп вправо'} if start and", "self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return True def _check_new_usr_info(self, u_id): usr", "u_id, gender) -> bool: if gender.isdigit() and int(gender) in range(1, 3): self.user_app.update(u_id, int(gender),", "1 или /gender 2') elif text_msg.split()[0] == '/re': txt_c = len(text_msg.split()) == 2", "None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id =", "по кнопке из меню чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(),", "is None or len(user['dob'].split('.')) != 3 or not 1942 <= int(user['dob'].split('.')[2]) < 2014:", "31 and 1 <= int(m) <= 12 and 1942 <= int(y) <= 2013:", "= UserApp(session) self.oauth_link = oauth_link self.u_vk_api = None def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name']", "😞\\nнапишите -> start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def", "'Дата указана неверено') return False def _c_city(self, u_id, country, city) -> bool: vk", "if user['dob'] is None or len(user['dob'].split('.')) != 3 or not 1942 <= int(user['dob'].split('.')[2])", "self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/re 1-6') elif step == 1 and", "color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id,", "-> None: message = f\"неизвестная команда '{txt_msg}' 😞\\nнапишите -> start\" keyboard = VkKeyboard(one_time=False)", "and user_exist: if start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg == 'ну...давай позже 😔': self._send_bye(user_id,", "vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True) country = self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if country_id", "= {'dob': usr.dob, 'city': usr.city, 'gender': usr.gender, 'relation': usr.relation} if usr_info['dob'] is None:", "return None r_usr = usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id)", "keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg) -> None: message = f\"неизвестная команда '{txt_msg}'", "= self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if get_id is not", "f\"неизвестная команда '{txt_msg}' 😞\\nнапишите -> start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message,", "_c_dob(self, u_id, answer) -> bool: if '.' in answer: num = answer.split('.') if", "get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) #", "= VKinderUser(u_token).get_info() if user['dob'] is None or len(user['dob'].split('.')) != 3 or not 1942", "False def _c_city(self, u_id, country, city) -> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api =", "src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook class VKGroupManage: QUEUE = Queue() COMMANDS = {'start',", "self._send_msg(u_id, 'Страна/город указан неверено') return False def _c_gender(self, u_id, gender) -> bool: if", "elif step == 1 and user_exist: if start: self._send_msg(user_id, 'приветствую, нажмите на кнопки", "lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return True def _check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id)", "self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start = text_msg in {'start', 'начать'} next_ = text_msg", "формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name) ->", "SearchEngine, VKinderUser, VkUserCook class VKGroupManage: QUEUE = Queue() COMMANDS = {'start', 'начать', 'search',", "int(y) <= 2013: self.user_app.update(u_id, answer, 'dob') self._send_msg(u_id, 'я запомню вашу днюху ☺️') return", "user_token) elif text_msg.split()[0] == '/dob': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_dob(user_id,", "self.user_app = UserApp(session) self.oauth_link = oauth_link self.u_vk_api = None def _get_firstname(self, user_id): return", "в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в черный список 🌚\\n' 'идет следующий поиск...')", "None r_usr = usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return", "self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id)", "bool: if '.' in answer: num = answer.split('.') if len(num) == 3: d,", "import Queue from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from vk_api.keyboard import VkKeyboard, VkKeyboardColor from", "firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return True def _check_new_usr_info(self, u_id): usr =", "давай познакомимся 🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id())", "if user['gender'] == 0: user['gender'] = None if user['relation'] == 0: user['relation'] =", "None: self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _resend(self, peer_id, value: str): message = f'Неверный", "None: message = f\"неизвестная команда '{txt_msg}' 😞\\nнапишите -> start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start',", "bool: if relation.isdigit() and int(relation) in range(1, 9): self.user_app.update(u_id, int(relation), 'relation') return True", "1942 <= int(user['dob'].split('.')[2]) < 2014: user['dob'] = None if user['gender'] == 0: user['gender']", "return True return False def _c_dob(self, u_id, answer) -> bool: if '.' in", "'ну...давай позже 😔': self._send_bye(user_id, user_firstname) elif text_msg == 'а давай познакомимся 🐼': self._get_acquaintance(user_token)", "VKGroupManage: QUEUE = Queue() COMMANDS = {'start', 'начать', 'search', 'свайп вправо', 'доб. в", "= len(text_msg.split()) == 2 if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/re", "import VkKeyboard, VkKeyboardColor from vk_api.utils import get_random_id from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp,", "search_engine): if usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name}, подходящих пользователей не найдено... вернитесь чуть", "def _c_city(self, u_id, country, city) -> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api()", "= SearchEngine(u_id, user_token) if usr_search.check_users_existence() is None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year,", "-> None: message = f'{firstname}, мы все еще не знакомы... давай познакомимся? 🐼\\n'", "keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб.", "формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name) -> None: message", "self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/gender': txt_c = len(text_msg.split()) ==", "if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/gender 1 или /gender 2')", "= f'Хаю-Хай 🐍 {usr_name}, для работы с ботом перейдите по кнопке снизу \"sign", "'Семейное положение указан неверно') return False def _search_users(self, u_id, user_token): usr_search = UserSearchList(u_id,", "self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year - usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self, u_id, name, usr_search_list, usr_cook,", "keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id, usr_name) ->", "self._next(user_id, user_token, user_firstname) elif text_msg == 'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен", "user['dob'] = None if user['gender'] == 0: user['gender'] = None if user['relation'] ==", "браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) -> None: keyboard", "answer.split('.') if len(num) == 3: d, m, y = num[0], num[1], num[2] if", "usr_search = UserSearchList(u_id, session) s_engine = SearchEngine(u_id, user_token) if usr_search.check_users_existence() is None: usr", "import flag from queue import Queue from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType from vk_api.keyboard", "elif next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg == 'доб. в избранное': self._move_to_fav(user_id)", "найдено... вернитесь чуть позже😓') return None r_usr = usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if", "1\" -> девушка, \"/gender 2\" -> парень') return False if usr_info['relation'] is None:", "message = f'{firstname}, мы все еще не знакомы... давай познакомимся? 🐼\\n' \\ f'(нужно", "def _re_check(self, u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users, args=(u_id, u_token),", "return True def _re_check(self, u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1) t = threading.Thread(target=self._search_users,", "None: message = ('Ваше семейное положение? Отправьте \"/re\" и цифру от 1 -", "True return False def _c_dob(self, u_id, answer) -> bool: if '.' in answer:", "and m.isdigit() and y.isdigit(): if 1 <= int(d) <= 31 and 1 <=", "'а давай познакомимся 🐼'} def __init__(self, vk_group_token, group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll", "\"/from BY Минск\"') return False if usr_info['gender'] is None: self._send_msg(u_id, 'Ваш пол?\\n пр.", "def _next(self, user_id, user_token, user_firstname) -> None: usr_search = UserSearchList(user_id, session) v_usr_cook =", "self.vk = vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token = UserAppToken(session)", "threading import vk_api import datetime as dt import flag from queue import Queue", "is None: self._send_msg(u_id, 'Откуда вы? в формате => (RU,UA,BY,UZ) или (🇷🇺🇺🇦🇧🇾🇺🇿) и напишите", "== '/from': txt_c = len(text_msg.split()) == 3 if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id,", "import UserAppToken, UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook class VKGroupManage:", "_c_city(self, u_id, country, city) -> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag", "активном поиске\\n7 - влюблён/влюблена\\n8 - в гражданском браке\\n\\nпр. \"/re 6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None,", "= VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в", "len(text_msg.split()) == 3 if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0] ==", "message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id, usr_name) -> None: message = f'{usr_name}, <NAME>", "'начать', 'search', 'свайп вправо', 'доб. в избранное', 'доб. в чс', 'ну...давай позже 😔',", "SearchEngine(user_id, user_token) random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if", "message = ('Ваше семейное положение? Отправьте \"/re\" и цифру от 1 - 8\\n\\n1", "== '/dob': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token)", "and \\ text_msg.split()[0] not in {'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg) else: user_exist", "давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/dob': txt_c = len(text_msg.split())", "int(relation), 'relation') return True self._send_msg(u_id, 'Семейное положение указан неверно') return False def _search_users(self,", "== 2 if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/re 1-6') elif", "if d.isdigit() and m.isdigit() and y.isdigit(): if 1 <= int(d) <= 31 and", "return False def _c_relation(self, u_id, relation) -> bool: if relation.isdigit() and int(relation) in", "def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token, user_firstname) -> None: usr_search", "random_id=get_random_id()) def _send_bye(self, peer_id, usr_name) -> None: message = f'{usr_name}, <NAME> вызвать меня", "'пользователь добавлен в черный список 🌚\\n' 'идет следующий поиск...') self._next(user_id, user_token, user_firstname) if", "все еще не знакомы... давай познакомимся? 🐼\\n' \\ f'(нужно познакомится с пандой, чтобы", "session) s_engine = SearchEngine(u_id, user_token) if usr_search.check_users_existence() is None: usr = self.user_app.get_user(u_id) s_engine.search_users_n_add_to_db(age=dt.datetime.now().year", "_send_msg_signed_in(self, peer_id, firstname) -> None: message = f'{firstname}, мы все еще не знакомы...", "message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg) -> None: message = f\"неизвестная команда", "user_token = self.userapp_token.get_user_token(user_id) step = self.userapp_token.get_step(user_id) start = text_msg in {'start', 'начать'} next_", "gender=user['gender'], city=user['city'], relation=user['relation']) return True def _check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id) usr_info =", "снизу \"sign up 📝\" и ' \\ f'выдайте необходимые права 🐼 после нажмите", "⭐\\n' 'идет следующий поиск...️') self._next(user_id, user_token, user_firstname) elif text_msg == 'доб. в чс':", "☺️') return True self._send_msg(u_id, 'Страна/город указан неверено') return False def _c_gender(self, u_id, gender)", "text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id}, text: {text_msg} \") if text_msg not", "{'next', 'свайп вправо'} if start and user_exist is False: self._send_msg_sign_up(user_id, user_firstname) elif step", "get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def", "city) -> bool: vk = vk_api.VkApi(token=self.userapp_token.get_user_token(u_id)) self.u_vk_api = vk.get_api() country_flag = flag.dflagize(f\"{country.strip()}\", subregions=True)", "str): message = f'Неверный формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def", "elif text_msg == 'а давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0] ==", "'/re 1-6') elif step == 1 and user_exist: if start: self._send_msg(user_id, 'приветствую, нажмите", "знакомы... давай познакомимся? 🐼\\n' \\ f'(нужно познакомится с пандой, чтобы перейти к поиску)'", "vk_api.VkApi(token=vk_group_token) self.long_poll = VkBotLongPoll(self.vk, group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app =", "9): self.user_app.update(u_id, int(relation), 'relation') return True self._send_msg(u_id, 'Семейное положение указан неверно') return False", "= threading.Thread(target=self._search_users, args=(u_id, u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return", "m, y = num[0], num[1], num[2] if d.isdigit() and m.isdigit() and y.isdigit(): if", "📝', link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname) ->", "txt_c = len(text_msg.split()) == 2 if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id,", "😔', 'а давай познакомимся 🐼'} def __init__(self, vk_group_token, group_id, oauth_link): self.vk = vk_api.VkApi(token=vk_group_token)", "self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0] == '/from': txt_c", "False if usr_info['relation'] is None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return True def _re_check(self,", "COMMANDS = {'start', 'начать', 'search', 'свайп вправо', 'доб. в избранное', 'доб. в чс',", "self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id", "_ask_to_move_msg(self, peer_id) -> None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в", "False def _c_relation(self, u_id, relation) -> bool: if relation.isdigit() and int(relation) in range(1,", "self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg == 'доб. в избранное': self._move_to_fav(user_id) self._send_msg(user_id, 'пользователь", "def _move_to_fav(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if", "или по кнопке из меню чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message,", "if usr_info['relation'] is None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id) return True def _re_check(self, u_id,", "self._resend(user_id, '/gender 1 или /gender 2') elif text_msg.split()[0] == '/re': txt_c = len(text_msg.split())", "if user['relation'] == 0: user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'],", "{text_msg} \") if text_msg not in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not in {'/dob',", "usr_name) -> None: message = f'Хаю-Хай 🐍 {usr_name}, для работы с ботом перейдите", "def _search_users(self, u_id, user_token): usr_search = UserSearchList(u_id, session) s_engine = SearchEngine(u_id, user_token) if", "Отправьте \"/re\" и цифру от 1 - 8\\n\\n1 - не женат/не замужем\\n' '2", "_ask_relation_msg(self, peer_id) -> None: message = ('Ваше семейное положение? Отправьте \"/re\" и цифру", "random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self): for event in self.long_poll.listen(): if event.type", "self.oauth_link = oauth_link self.u_vk_api = None def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self,", "else: self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0] == '/from': txt_c = len(text_msg.split()) == 3", "= oauth_link self.u_vk_api = None def _get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id,", "user_firstname, usr_search, v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if get_id is not None: usr_search.move_user_to_archive(get_id)", "f'Хаю-Хай 🐍 {usr_name}, для работы с ботом перейдите по кнопке снизу \"sign up", "self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname) -> None: message = f'{firstname},", "= num[0], num[1], num[2] if d.isdigit() and m.isdigit() and y.isdigit(): if 1 <=", "user_firstname) elif text_msg == 'а давай познакомимся 🐼': self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0]", "2\" -> парень') return False if usr_info['relation'] is None: self._ask_relation_msg(u_id) return False self._ask_to_move_msg(u_id)", "{'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg) else: user_exist = self.userapp_token.check_user(user_id) user_token = self.userapp_token.get_user_token(user_id)", "self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging def _send_msg(self, peer_id, message) -> None: self.vk_api.messages.send(peer_id=peer_id, message=message,", "u_token), daemon=True) VKGroupManage.QUEUE.put(t) while not VKGroupManage.QUEUE.empty(): VKGroupManage.QUEUE.get().start() return True return False def _c_dob(self,", "до 80 лет допускается)' '\\nпр. \"/dob 15.7.1990\" ') return False if usr_info['city'] is", "start\" keyboard = VkKeyboard(one_time=False) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _ask_relation_msg(self, peer_id)", "if relation.isdigit() and int(relation) in range(1, 9): self.user_app.update(u_id, int(relation), 'relation') return True self._send_msg(u_id,", "usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]',", "= None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'], gender=user['gender'], city=user['city'], relation=user['relation']) return True def _check_new_usr_info(self,", "None or len(user['dob'].split('.')) != 3 or not 1942 <= int(user['dob'].split('.')[2]) < 2014: user['dob']", "= usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name,", "usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id,", "VKinderUser, VkUserCook class VKGroupManage: QUEUE = Queue() COMMANDS = {'start', 'начать', 'search', 'свайп", "нашли подходящего пользователей 🐼') self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage):", "= ('Ваше семейное положение? Отправьте \"/re\" и цифру от 1 - 8\\n\\n1 -", "(от 9 до 80 лет допускается)' '\\nпр. \"/dob 15.7.1990\" ') return False if", "if get_id is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def", "'city': usr.city, 'gender': usr.gender, 'relation': usr.relation} if usr_info['dob'] is None: self._send_msg(u_id, 'Напишите дату", "random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if get_id is", "🐼\\n' \\ f'(нужно познакомится с пандой, чтобы перейти к поиску)' keyboard = VkKeyboard(one_time=True)", "дату рождения в формате: -> /dob D.M.YYYY (от 9 до 80 лет допускается)'", "step == 1 and user_exist: if start: self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу", "user_token, user_firstname) elif text_msg == 'доб. в чс': self._move_to_black(user_id) self._send_msg(user_id, 'пользователь добавлен в", "меню чата' keyboard = VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self,", "from src.vk_chat_bot.vk.vkontakte import SearchEngine, VKinderUser, VkUserCook class VKGroupManage: QUEUE = Queue() COMMANDS =", "v_usr_cook = VkUserCook(user_token) s_engine = SearchEngine(user_id, user_token) random_id = self._generate_user(user_id, user_firstname, usr_search, v_usr_cook,", "random_id=get_random_id()) def _send_msg_sign_up(self, peer_id, usr_name) -> None: message = f'Хаю-Хай 🐍 {usr_name}, для", "keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE)", "return False def _c_gender(self, u_id, gender) -> bool: if gender.isdigit() and int(gender) in", "usr.city, 'gender': usr.gender, 'relation': usr.relation} if usr_info['dob'] is None: self._send_msg(u_id, 'Напишите дату рождения", "self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self, peer_id, usr_name) -> None: message = f'{usr_name},", "not in VKLaunchGroup.COMMANDS and \\ text_msg.split()[0] not in {'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id,", "self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) -> None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп", "'\\nпр. \"/from 🇺🇦 Киев\" или \"/from 🇷🇺 Москва\" или \"/from BY Минск\"') return", "next_ = text_msg in {'next', 'свайп вправо'} if start and user_exist is False:", "is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) # Messaging def _send_msg(self, peer_id, message)", "keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname) -> None: message", "self._get_acquaintance(user_token) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/dob': txt_c = len(text_msg.split()) == 2 if", "6\"') self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) -> None: keyboard = VkKeyboard(one_time=True)", "relation) -> bool: if relation.isdigit() and int(relation) in range(1, 9): self.user_app.update(u_id, int(relation), 'relation')", "{city} ☺️') return True self._send_msg(u_id, 'Страна/город указан неверено') return False def _c_gender(self, u_id,", "- usr.dob.year, gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine): if", "user_firstname) elif step == 0 and user_exist: if start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg", "link=self.oauth_link) keyboard.add_button('start', color=VkKeyboardColor.POSITIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_msg_signed_in(self, peer_id, firstname) -> None:", "-> None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY) keyboard.add_line() keyboard.add_button('доб. в избранное', color=VkKeyboardColor.POSITIVE)", "и напишите город' '\\nпр. \"/from 🇺🇦 Киев\" или \"/from 🇷🇺 Москва\" или \"/from", "None: self._send_msg(u_id, f'{name}, подходящих пользователей не найдено... вернитесь чуть позже😓') return None r_usr", "user_exist is False: self._send_msg_sign_up(user_id, user_firstname) elif step == 0 and user_exist: if start:", "напишите город' '\\nпр. \"/from 🇺🇦 Киев\" или \"/from 🇷🇺 Москва\" или \"/from BY", "len(user['dob'].split('.')) != 3 or not 1942 <= int(user['dob'].split('.')[2]) < 2014: user['dob'] = None", "\"/gender 1\" -> девушка, \"/gender 2\" -> парень') return False if usr_info['relation'] is", "user_firstname) elif text_msg == 'ну...давай позже 😔': self._send_bye(user_id, user_firstname) elif text_msg == 'а", "usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_black(get_id)", "None if user['gender'] == 0: user['gender'] = None if user['relation'] == 0: user['relation']", "от 1 - 8\\n\\n1 - не женат/не замужем\\n' '2 - есть друг/есть подруга\\n3", "self.vk_api.messages.send(peer_id=u_id, message=f'[id{r_usr.vk_usr_id}|{r_usr.firstname} {r_usr.lastname}]', attachment=attach, random_id=get_random_id()) return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self): for event", "group_id=group_id) self.vk_api = self.vk.get_api() self.userapp_token = UserAppToken(session) self.user_app = UserApp(session) self.oauth_link = oauth_link", "давай познакомимся? 🐼\\n' \\ f'(нужно познакомится с пандой, чтобы перейти к поиску)' keyboard", "return r_usr.vk_usr_id class VKLaunchGroup(VKGroupManage): def start(self): for event in self.long_poll.listen(): if event.type ==", "from vk_api.utils import get_random_id from src.vk_chat_bot.db.database import UserAppToken, UserSearchList, UserApp, session from src.vk_chat_bot.vk.vkontakte", "relation=user['relation']) return True def _check_new_usr_info(self, u_id): usr = self.user_app.get_user(u_id) usr_info = {'dob': usr.dob,", "\"/from 🇷🇺 Москва\" или \"/from BY Минск\"') return False if usr_info['gender'] is None:", "Москва\" или \"/from BY Минск\"') return False if usr_info['gender'] is None: self._send_msg(u_id, 'Ваш", "if txt_c: self._c_city(user_id, text_msg.split()[1], text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/gender': txt_c =", "keyboard=None, random_id=get_random_id()) def _ask_to_move_msg(self, peer_id) -> None: keyboard = VkKeyboard(one_time=True) keyboard.add_button('свайп вправо', color=VkKeyboardColor.SECONDARY)", "text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0] == '/from': txt_c =", "if usr_search_list.check_users_existence() is None: self._send_msg(u_id, f'{name}, подходящих пользователей не найдено... вернитесь чуть позже😓')", "f'{country} {city} ☺️') return True self._send_msg(u_id, 'Страна/город указан неверено') return False def _c_gender(self,", "self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if country_id != 0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city, count=1)['items']", "VkKeyboard(one_time=True) keyboard.add_button('start', color=VkKeyboardColor.SECONDARY) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _unknown_command(self, peer_id, txt_msg) -> None:", "= None if user['relation'] == 0: user['relation'] = None self.user_app.add_user(vk_id=user['id'], firstname=user['firstname'], lastname=user['lastname'], dob=user['dob'],", "random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) -> None: usr_search = UserSearchList(user_id, session) get_id =", "== 2 if txt_c: self._c_dob(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/dob D.M.YYYY') elif", "in self.long_poll.listen(): if event.type == VkBotEventType.MESSAGE_NEW: user_id = event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg", "_get_firstname(self, user_id): return self.vk_api.users.get(user_ids=user_id)[0]['first_name'] def _next(self, user_id, user_token, user_firstname) -> None: usr_search =", "= event.obj['message']['peer_id'] user_firstname = self._get_firstname(user_id) text_msg = event.obj['message']['text'].strip().lower() print(f\"New msg from {user_id}, text:", "нажмите на кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif", "keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token): user =", "color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('доб. в чс', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message='✔️', keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token):", "v_usr_cook, s_engine) get_id = self.userapp_token.get_last_searched_id(user_id) if get_id is not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id)", "📝\" и ' \\ f'выдайте необходимые права 🐼 после нажмите на зеленую кнопку", "черный список 🌚\\n' 'идет следующий поиск...') self._next(user_id, user_token, user_firstname) if __name__ == '__main__':", "None: message = f'Хаю-Хай 🐍 {usr_name}, для работы с ботом перейдите по кнопке", "в избранное', 'доб. в чс', 'ну...давай позже 😔', 'а давай познакомимся 🐼'} def", "is None: self._send_msg(u_id, f'{name}, подходящих пользователей не найдено... вернитесь чуть позже😓') return None", "мы все еще не знакомы... давай познакомимся? 🐼\\n' \\ f'(нужно познакомится с пандой,", "\"sign up 📝\" и ' \\ f'выдайте необходимые права 🐼 после нажмите на", "== '/re': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_relation(user_id, text_msg.split()[1]) self._re_check(user_id, user_token)", "self._ask_to_move_msg(user_id) elif next_: self._next(user_id, user_token, user_firstname) self._ask_to_move_msg(user_id) elif text_msg == 'доб. в избранное':", "познакомимся? 🐼\\n' \\ f'(нужно познакомится с пандой, чтобы перейти к поиску)' keyboard =", "0 and user_exist: if start: self._send_msg_signed_in(user_id, user_firstname) elif text_msg == 'ну...давай позже 😔':", "None: usr_search.move_user_to_favourite(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id) def _move_to_black(self, user_id) -> None: usr_search = UserSearchList(user_id,", "= self.u_vk_api.database.getCountries(code=country_flag) country_id = country['items'][0]['id'] if country_id != 0: ci = self.u_vk_api.database.getCities(country_id=country_id, q=city,", "== 2 if txt_c: self._c_gender(user_id, text_msg.split()[1]) self._re_check(user_id, user_token) else: self._resend(user_id, '/gender 1 или", "message = f'Неверный формат, правильный формат: {value}' self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=None, random_id=get_random_id()) def _send_msg_sign_up(self,", "user_exist: if start: self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif next_:", "usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно нашли подходящего пользователей", "gender=usr.gender, city=usr.city, relation=usr.relation) def _generate_user(self, u_id, name, usr_search_list, usr_cook, search_engine): if usr_search_list.check_users_existence() is", "if start: self._send_msg(user_id, 'приветствую, нажмите на кнопки снизу 🐼') self._ask_to_move_msg(user_id) elif next_: self._next(user_id,", "not None: usr_search.move_user_to_archive(get_id) self.userapp_token.update_last_searched(user_id, random_id) else: self.userapp_token.update_last_searched(user_id, random_id) self._ask_to_move_msg(user_id) def _move_to_fav(self, user_id) ->", "session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_black(get_id) self.userapp_token.update_last_searched(user_id, None) self._ask_to_move_msg(user_id)", "') return False if usr_info['city'] is None: self._send_msg(u_id, 'Откуда вы? в формате =>", "-> bool: if relation.isdigit() and int(relation) in range(1, 9): self.user_app.update(u_id, int(relation), 'relation') return", "познакомится с пандой, чтобы перейти к поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся", "text_msg.split()[0] not in {'/dob', '/from', '/gender', '/re'}: self._unknown_command(user_id, text_msg) else: user_exist = self.userapp_token.check_user(user_id)", "usr_search = UserSearchList(user_id, session) get_id = self.userapp_token.get_last_searched_id(user_id) if user_id is not None: usr_search.move_user_to_favourite(get_id)", "перейдите по кнопке снизу \"sign up 📝\" и ' \\ f'выдайте необходимые права", "self._send_msg_sign_up(user_id, user_firstname) elif step == 0 and user_exist: if start: self._send_msg_signed_in(user_id, user_firstname) elif", "self._send_msg(u_id, 'Неверный пол') return False def _c_relation(self, u_id, relation) -> bool: if relation.isdigit()", "_send_msg_sign_up(self, peer_id, usr_name) -> None: message = f'Хаю-Хай 🐍 {usr_name}, для работы с", "return False self._ask_to_move_msg(u_id) return True def _re_check(self, u_id, u_token): if self._check_new_usr_info(u_id): self.userapp_token.update_step(u_id, 1)", "usr_search_list.select_random_row() attach = usr_cook.get_user_photos(r_usr.vk_usr_id) if len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list,", "return False def _search_users(self, u_id, user_token): usr_search = UserSearchList(u_id, session) s_engine = SearchEngine(u_id,", "False: self._send_msg_sign_up(user_id, user_firstname) elif step == 0 and user_exist: if start: self._send_msg_signed_in(user_id, user_firstname)", "указан неверено') return False def _c_gender(self, u_id, gender) -> bool: if gender.isdigit() and", "elif text_msg.split()[0] == '/re': txt_c = len(text_msg.split()) == 2 if txt_c: self._c_relation(user_id, text_msg.split()[1])", "user_firstname) -> None: usr_search = UserSearchList(user_id, session) v_usr_cook = VkUserCook(user_token) s_engine = SearchEngine(user_id,", "count=1)['items'] self.user_app.update(u_id, ci[0]['id'], 'city') self._send_msg(u_id, f'{country} {city} ☺️') return True self._send_msg(u_id, 'Страна/город указан", "in range(1, 9): self.user_app.update(u_id, int(relation), 'relation') return True self._send_msg(u_id, 'Семейное положение указан неверно')", "с пандой, чтобы перейти к поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай познакомимся 🐼',", "☺️') return True self._send_msg(u_id, 'Дата указана неверено') return False def _c_city(self, u_id, country,", "len(attach) != 3: usr_search_list.move_user_to_archive(r_usr.vk_usr_id) return self._generate_user(u_id, name, usr_search_list, usr_cook, search_engine) self._send_msg(u_id, f'{name}, успешно", "f'(нужно познакомится с пандой, чтобы перейти к поиску)' keyboard = VkKeyboard(one_time=True) keyboard.add_button('а давай", "def _send_msg_sign_up(self, peer_id, usr_name) -> None: message = f'Хаю-Хай 🐍 {usr_name}, для работы", "def _send_msg_signed_in(self, peer_id, firstname) -> None: message = f'{firstname}, мы все еще не", "есть друг/есть подруга\\n3 - помолвлен/помолвлена\\n4 - женат/замужем\\n5 - всё сложно\\n' '6 - в", "== 0: user['gender'] = None if user['relation'] == 0: user['relation'] = None self.user_app.add_user(vk_id=user['id'],", "None: self._send_msg(u_id, 'Ваш пол?\\n пр. \"/gender 1\" -> девушка, \"/gender 2\" -> парень')", "keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _get_acquaintance(self, u_token): user = VKinderUser(u_token).get_info() if user['dob'] is None or", "in answer: num = answer.split('.') if len(num) == 3: d, m, y =", "False def _search_users(self, u_id, user_token): usr_search = UserSearchList(u_id, session) s_engine = SearchEngine(u_id, user_token)", "🐼', color=VkKeyboardColor.POSITIVE) keyboard.add_line() keyboard.add_button('ну...давай позже 😔', color=VkKeyboardColor.NEGATIVE) self.vk_api.messages.send(peer_id=peer_id, message=message, keyboard=keyboard.get_keyboard(), random_id=get_random_id()) def _send_bye(self,", "self._resend(user_id, '/dob D.M.YYYY') elif text_msg.split()[0] == '/from': txt_c = len(text_msg.split()) == 3 if", "text_msg.split()[2]) self._re_check(user_id, user_token) elif text_msg.split()[0] == '/gender': txt_c = len(text_msg.split()) == 2 if" ]
[ "j in jsons: name = j[\"name\"] address = j[\"address\"] character = extract_char(j[\"description\"]) latitude", "'未登録' for j in jsons: name = j[\"name\"] address = j[\"address\"] character =", "content = open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents = list(filter(lambda x: x != \"\",", "address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi, website=mi, note=mi, latitude=latitude,", "open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents = list(filter(lambda x: x != \"\", contents)) jsons", "import timezone content = open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents = list(filter(lambda x: x", "django.utils import timezone content = open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents = list(filter(lambda x:", "contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:] mi = '未登録' for j in jsons: name", "j[\"name\"] address = j[\"address\"] character = extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude = j[\"location\"][1]", "= content.split(\"\\n\") contents = list(filter(lambda x: x != \"\", contents)) jsons = list(map(json.loads,", "= j[\"location\"][0] longitude = j[\"location\"][1] onsen = Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi,", "\"\", contents)) jsons = list(map(json.loads, contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:] mi = '未登録'", "j[\"address\"] character = extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude = j[\"location\"][1] onsen = Onsen(name=name,", "list(map(json.loads, contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:] mi = '未登録' for j in jsons:", "return c[c.find(\"泉質:\")+3:] mi = '未登録' for j in jsons: name = j[\"name\"] address", "contents = content.split(\"\\n\") contents = list(filter(lambda x: x != \"\", contents)) jsons =", "timezone content = open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents = list(filter(lambda x: x !=", "mi = '未登録' for j in jsons: name = j[\"name\"] address = j[\"address\"]", "latitude = j[\"location\"][0] longitude = j[\"location\"][1] onsen = Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi,", "= j[\"location\"][1] onsen = Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character,", "= list(map(json.loads, contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:] mi = '未登録' for j in", "jsons: name = j[\"name\"] address = j[\"address\"] character = extract_char(j[\"description\"]) latitude = j[\"location\"][0]", "extract_char(c): return c[c.find(\"泉質:\")+3:] mi = '未登録' for j in jsons: name = j[\"name\"]", "c[c.find(\"泉質:\")+3:] mi = '未登録' for j in jsons: name = j[\"name\"] address =", "business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi, website=mi, note=mi, latitude=latitude, longitude=longitude, publish_date=timezone.now(),", "in jsons: name = j[\"name\"] address = j[\"address\"] character = extract_char(j[\"description\"]) latitude =", "= j[\"address\"] character = extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude = j[\"location\"][1] onsen =", "from database.models import Onsen import json from django.utils import timezone content = open(\"docs/points.json\").read()", "contents = list(filter(lambda x: x != \"\", contents)) jsons = list(map(json.loads, contents)) def", "content.split(\"\\n\") contents = list(filter(lambda x: x != \"\", contents)) jsons = list(map(json.loads, contents))", "name = j[\"name\"] address = j[\"address\"] character = extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude", "= Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi,", "holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi, website=mi, note=mi, latitude=latitude, longitude=longitude, publish_date=timezone.now(), modified_date=timezone.now())", "list(filter(lambda x: x != \"\", contents)) jsons = list(map(json.loads, contents)) def extract_char(c): return", "daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi, website=mi, note=mi, latitude=latitude, longitude=longitude, publish_date=timezone.now(), modified_date=timezone.now()) onsen.save()", "json from django.utils import timezone content = open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents =", "database.models import Onsen import json from django.utils import timezone content = open(\"docs/points.json\").read() contents", "def extract_char(c): return c[c.find(\"泉質:\")+3:] mi = '未登録' for j in jsons: name =", "extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude = j[\"location\"][1] onsen = Onsen(name=name, tel=mi, address=address, traffic=mi,", "x: x != \"\", contents)) jsons = list(map(json.loads, contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:]", "= open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents = list(filter(lambda x: x != \"\", contents))", "= '未登録' for j in jsons: name = j[\"name\"] address = j[\"address\"] character", "import json from django.utils import timezone content = open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents", "!= \"\", contents)) jsons = list(map(json.loads, contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:] mi =", "jsons = list(map(json.loads, contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:] mi = '未登録' for j", "= j[\"name\"] address = j[\"address\"] character = extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude =", "= extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude = j[\"location\"][1] onsen = Onsen(name=name, tel=mi, address=address,", "j[\"location\"][1] onsen = Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi,", "onsen = Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi,", "tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi, website=mi, note=mi,", "longitude = j[\"location\"][1] onsen = Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi,", "traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi, website=mi, note=mi, latitude=latitude, longitude=longitude,", "<reponame>kikei/onsen<filename>docs/import.py from database.models import Onsen import json from django.utils import timezone content =", "x != \"\", contents)) jsons = list(map(json.loads, contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:] mi", "address = j[\"address\"] character = extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude = j[\"location\"][1] onsen", "character = extract_char(j[\"description\"]) latitude = j[\"location\"][0] longitude = j[\"location\"][1] onsen = Onsen(name=name, tel=mi,", "Onsen import json from django.utils import timezone content = open(\"docs/points.json\").read() contents = content.split(\"\\n\")", "= list(filter(lambda x: x != \"\", contents)) jsons = list(map(json.loads, contents)) def extract_char(c):", "Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi, price=mi, character=character, indoor=mi, outdoor=mi, parking=mi, website=mi,", "for j in jsons: name = j[\"name\"] address = j[\"address\"] character = extract_char(j[\"description\"])", "j[\"location\"][0] longitude = j[\"location\"][1] onsen = Onsen(name=name, tel=mi, address=address, traffic=mi, business_hours=mi, holiday=mi, daytrip=mi,", "from django.utils import timezone content = open(\"docs/points.json\").read() contents = content.split(\"\\n\") contents = list(filter(lambda", "import Onsen import json from django.utils import timezone content = open(\"docs/points.json\").read() contents =", "contents)) jsons = list(map(json.loads, contents)) def extract_char(c): return c[c.find(\"泉質:\")+3:] mi = '未登録' for" ]
[ "mixer from rest_framework import status from rest_framework.test import APIClient from rest_framework_jwt.settings import api_settings", "User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response", "import api_settings from django.test import TestCase from userapp.models import User from patientapp.models import", "== 200, 'only admin can edit' # authorized user user_obj = mixer.blend(User,admin=True) payload", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code", "mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response =", "rest_framework import status from rest_framework.test import APIClient from rest_framework_jwt.settings import api_settings from django.test", "added' # serializers errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token =", "\\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add' # location", "geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204, 'content not found'", "client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add' #", "client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add' #", "token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204, 'data delete'", "first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response =", "response = client.get('/api/v1/geography') assert response.status_code == 200, 'admin can access' def test_post_geography(self): patient_obj", "from treatmentapp.models import Treatment from addressapp.models import Geography, ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler", "assert response.status_code == 400, 'only admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient)", "patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client = APIClient() # un authorized access", "location already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "\\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only string' #", "APIClient from rest_framework_jwt.settings import api_settings from django.test import TestCase from userapp.models import User", "= mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin can delete'", "user response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' user_obj = mixer.blend(User)", "jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\", "\\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists' # authorized user", "TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client = APIClient() # un authorized access by user response", "== 401, 'list geography' user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "user geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401, 'Permission not", "un authorized access by user geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code", "'only admin can add' # authorized user with admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True)", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert", "= client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add'", "access by user patient_obj = mixer.blend(Patient) response = client.post('/api/v1/geography') assert response.status_code == 401,", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json')", "= mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj", "# unauthorized user user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert", "can add' # authorized user with admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload =", "client.post('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.' # authorized user user_obj", "can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client = APIClient()", "# serializers errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "APIClient() # un authorized access by user patient_obj = mixer.blend(Patient) response = client.put('/api/v1/geography')", "patient_obj = mixer.blend(Patient) response = client.put('/api/v1/geography') assert response.status_code == 401, 'Un authorized access", "204, 'content not found' #un authorized access by admin user_obj = mixer.blend(User) payload", "= mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204, 'content not found' #un", "'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only string' class TestGeographyUpdate(TestCase): def", "geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin can", "= APIClient() # un authorized access by user response = client.get('/api/v1/geography') assert response.status_code", "User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response", "User from patientapp.models import Patient from encounterapp.models import Encounter, History, Refer from treatmentapp.models", "= client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add'", "api_settings.JWT_ENCODE_HANDLER fake = Faker() import re class TestGeography(TestCase): def test_list_geography(self): client = APIClient()", "response.status_code == 401, 'list geography' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload", "History, Refer from treatmentapp.models import Treatment from addressapp.models import Geography, ActivityArea pytestmark =", "payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography)", "un authorized access by user patient_obj = mixer.blend(Patient) response = client.put('/api/v1/geography') assert response.status_code", "import APIClient from rest_framework_jwt.settings import api_settings from django.test import TestCase from userapp.models import", "mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin", "400, 'serializers errors' # location already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "== 400, 'only admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj =", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert", "response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200, 'geography added' #", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code", "response.status_code == 400, 'only admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj", "' + token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'user can access'", "from patientapp.models import Patient from encounterapp.models import Encounter, History, Refer from treatmentapp.models import", "response.status_code == 200, 'user can access' user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\", "mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin can access' def", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code", "Patient from encounterapp.models import Encounter, History, Refer from treatmentapp.models import Treatment from addressapp.models", "test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client = APIClient() # un authorized", "token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location", "response = client.put('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.' # unauthorized", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\", "== 400, 'only admin can add' # authorized user with admin user_obj =", "== 400, 'location already exists' # authorized user user_obj = mixer.blend(User,admin=True) payload =", "denied.' # authorized user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token =", "# un authorized access by user geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert", "token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code ==", "jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake = Faker() import re class TestGeography(TestCase):", "' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400,", "400, 'only admin can add' # location already added user_obj = mixer.blend(User,admin=True) payload", "mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client = APIClient() # un authorized access by user", "token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code ==", "== 401, 'list geography' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload =", "response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204, 'content not found' #authorized access by", "mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204, 'content not found' #un authorized", "# authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "= mixer.blend(Patient) response = client.put('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.'", "= pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake = Faker() import re", "= Faker() import re class TestGeography(TestCase): def test_list_geography(self): client = APIClient() # un", "rest_framework_jwt.settings import api_settings from django.test import TestCase from userapp.models import User from patientapp.models", "authorized user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "+ token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400,", "pytest from faker import Faker from mixer.backend.django import mixer from rest_framework import status", "treatmentapp.models import Treatment from addressapp.models import Geography, ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler =", "response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200, 'only admin can", "= mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204, 'content", "== 204, 'data delete' #un authorized access by admin user_obj = mixer.blend(User,admin=True) payload", "delete' #un authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "= client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists' #", "access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client = APIClient() #", "#authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "'content not found' #un authorized access by admin user_obj = mixer.blend(User) payload =", "import Patient from encounterapp.models import Encounter, History, Refer from treatmentapp.models import Treatment from", "from django.contrib.auth.models import Permission import pytest from faker import Faker from mixer.backend.django import", "== 400, 'serializers errors' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "import TestCase from userapp.models import User from patientapp.models import Patient from encounterapp.models import", "add' # location already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "user response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' #authorized access by", "# authorized user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists' # authorized", "= client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin can access' def test_post_geography(self): patient_obj", "response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code == 400, 'serializers errors' #", "== 204, 'content not found' #authorized access by admin user_obj = mixer.blend(User) payload", "== 400, 'serializers errors' # location already added user_obj = mixer.blend(User,admin=True) payload =", "import mixer from rest_framework import status from rest_framework.test import APIClient from rest_framework_jwt.settings import", "= mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204, 'data delete' #un authorized", "{'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only string' # authorized", "APIClient() # un authorized access by user geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id))", "response.status_code == 400, 'serializers errors' # authorized user user_obj = mixer.blend(User,admin=True) payload =", "string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client = APIClient() # un authorized access by", "= jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json')", "can add' # location already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response =", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id))", "== 200, 'admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj)", "= mixer.blend(Patient) response = client.post('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.'", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456),", "400, 'serializers errors' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code ==", "response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204, 'data delete' #un authorized access by", "' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400,", "'geography added' # serializers errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token", "pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake = Faker() import re class", "401, 'list geography' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "re class TestGeography(TestCase): def test_list_geography(self): client = APIClient() # un authorized access by", "response.status_code == 200, 'only admin can edit' # authorized user user_obj = mixer.blend(User,admin=True)", "+ token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code", "not found' #un authorized access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj)", "response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can", "response.status_code == 200, 'admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj =", "APIClient() # un authorized access by user patient_obj = mixer.blend(Patient) response = client.post('/api/v1/geography')", "mixer.blend(Patient) response = client.post('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.' #", "Refer from treatmentapp.models import Treatment from addressapp.models import Geography, ActivityArea pytestmark = pytest.mark.django_db", "assert response.status_code == 400, 'serializers errors' # authorized user user_obj = mixer.blend(User,admin=True) payload", "= client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204, 'content not found' #un authorized access by", "= client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204, 'content not found' #authorized access by admin", "response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin can access' def test_post_geography(self):", "serializers errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only string'", "client = APIClient() # un authorized access by user response = client.get('/api/v1/geography') assert", "# un authorized access by user patient_obj = mixer.blend(Patient) response = client.put('/api/v1/geography') assert", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json')", "user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "= mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response", "import User from patientapp.models import Patient from encounterapp.models import Encounter, History, Refer from", "= api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake = Faker() import re class TestGeography(TestCase): def", "def test_listupdate_geography(self): client = APIClient() # un authorized access by user response =", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'admin can", "django.contrib.auth.models import Permission import pytest from faker import Faker from mixer.backend.django import mixer", "response = client.post('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.' # authorized", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\", "access by user geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401,", "'only admin can edit' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "user user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "contain only string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client = APIClient() # un authorized", "authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' user_obj = mixer.blend(User) payload", "token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location", "= jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography',", "' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400,", "response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204, 'content not found'", "admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "client = APIClient() # un authorized access by user geography_obj = mixer.blend(Geography) response", "= jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography') assert", "only string' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "+ token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address", "import Faker from mixer.backend.django import mixer from rest_framework import status from rest_framework.test import", "api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake = Faker() import re class TestGeography(TestCase): def test_list_geography(self):", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code ==", "found' #authorized access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token =", "assert response.status_code == 400, 'location already exists' # authorized user user_obj = mixer.blend(User,admin=True)", "{'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only string' class TestGeographyUpdate(TestCase):", "admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client =", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json')", "api_settings from django.test import TestCase from userapp.models import User from patientapp.models import Patient", "= User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token)", "can access' user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "authorized access denied.' # authorized user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj)", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert", "#un authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code ==", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code", "= client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists' #", "mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204, 'content not", "authorized access denied.' # unauthorized user user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token", "token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin", "\\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add' # authorized", "' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204,", "401, 'list geography' user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "= client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code == 400, 'serializers errors' # authorized", "response.status_code == 400, 'location already exists' # authorized user user_obj = mixer.blend(User,admin=True) payload", "+ token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code", "= mixer.blend(Encounter,patient=patient_obj) client = APIClient() # un authorized access by user patient_obj =", "+ token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200, 'geography", "'location already exists' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists'", "assert response.status_code == 200, 'only admin can edit' # authorized user user_obj =", "assert response.status_code == 204, 'content not found' #authorized access by admin user_obj =", "Faker() import re class TestGeography(TestCase): def test_list_geography(self): client = APIClient() # un authorized", "mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj =", "+ token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'user can access' user_obj", "-*- from django.contrib.auth.models import Permission import pytest from faker import Faker from mixer.backend.django", "= APIClient() # un authorized access by user geography_obj = mixer.blend(Geography) response =", "un authorized access by user patient_obj = mixer.blend(Patient) response = client.post('/api/v1/geography') assert response.status_code", "= client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200, 'only admin can edit'", "authorized access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "Encounter, History, Refer from treatmentapp.models import Treatment from addressapp.models import Geography, ActivityArea pytestmark", "'content not found' def test_delete_geography(self): client = APIClient() # un authorized access by", "' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200,", "'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only string' # authorized user", "400, 'location already exists' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists'", "rest_framework.test import APIClient from rest_framework_jwt.settings import api_settings from django.test import TestCase from userapp.models", "' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204,", "client.get('/api/v1/geography') assert response.status_code == 200, 'admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient)", "+ token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code", "+ token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code", "token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204, 'content not", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json')", "+ token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only", "400, 'street_address should contain only string' # authorized user user_obj = mixer.blend(User,admin=True) payload", "'only admin can add' # location already added user_obj = mixer.blend(User,admin=True) payload =", "TestCase from userapp.models import User from patientapp.models import Patient from encounterapp.models import Encounter,", "mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code == 400, 'serializers errors'", "authorized access by user response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography'", "# un authorized access by user response = client.get('/api/v1/geography') assert response.status_code == 401,", "= client.post('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.' # authorized user", "mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204, 'data delete' #un authorized access", "already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake = Faker() import re class TestGeography(TestCase): def test_list_geography(self): client", "geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin can", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code", "assert response.status_code == 204, 'data delete' #un authorized access by admin user_obj =", "'Un authorized access denied.' # authorized user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload =", "'list geography' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "response.status_code == 401, 'Un authorized access denied.' # authorized user user_obj = User.objects.create(email=fake.email(),\\", "= mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response", "geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401, 'Permission not define'", "jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography') assert response.status_code", "<reponame>AbhiyantrikTechnology/DentalHub-Backend # -*- coding:utf-8 -*- from django.contrib.auth.models import Permission import pytest from faker", "' + token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'admin can access'", "response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200, 'admin can access' #authorized access by", "'data delete' #un authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json')", "= client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200, 'admin can access' #authorized access by admin", "user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token)", "\\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204, 'content not found' def test_delete_geography(self): client", "faker import Faker from mixer.backend.django import mixer from rest_framework import status from rest_framework.test", "= client.put('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.' # unauthorized user", "client.get('/api/v1/geography') assert response.status_code == 200, 'user can access' user_obj = mixer.blend(User,admin=True) payload =", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654))", "= client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204, 'content not found' def", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code", "#un authorized access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token =", "== 401, 'Permission not define' #un authorized access by admin user_obj = mixer.blend(User,admin=True)", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\", "mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response =", "assert response.status_code == 401, 'Un authorized access denied.' # unauthorized user user_obj =", "= mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401, 'Permission not define' #un", "200, 'user can access' user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "401, 'Un authorized access denied.' # authorized user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload", "client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'serializers errors' # location already", "\\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'serializers errors' # location already added", "client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code == 400, 'serializers errors' # authorized user", "token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204, 'content not", "already exists' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "can edit' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204, 'content not found' def test_delete_geography(self):", "authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "200, 'geography added' # serializers errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj)", "response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401, 'Permission not define' #un authorized access", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert", "= mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin can access'", "token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should", "by user patient_obj = mixer.blend(Patient) response = client.post('/api/v1/geography') assert response.status_code == 401, 'Un", "response.status_code == 401, 'Permission not define' #un authorized access by admin user_obj =", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code", "not found' #authorized access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token", "import Encounter, History, Refer from treatmentapp.models import Treatment from addressapp.models import Geography, ActivityArea", "define' #un authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "#authorized access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "== 400, 'street_address should contain only string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client =", "token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'serializers errors'", "geography' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "= mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj", "' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert", "admin can add' # authorized user with admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload", "jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\", "authorized access by user patient_obj = mixer.blend(Patient) response = client.post('/api/v1/geography') assert response.status_code ==", "+ token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200, 'admin", "assert response.status_code == 400, 'only admin can add' # location already added user_obj", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert", "400, 'street_address should contain only string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client = APIClient()", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code ==", "geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200, 'admin can access'", "Geography, ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake =", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert", "'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add' # location already added", "user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "response.status_code == 401, 'Un authorized access denied.' # unauthorized user user_obj = mixer.blend(User)", "response.status_code == 400, 'only admin can add' # authorized user with admin user_obj", "client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only string'", "= mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client = APIClient() # un authorized access by", "assert response.status_code == 400, 'street_address should contain only string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self):", "access by user response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' #authorized", "= client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200, 'geography added' # serializers", "only string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client = APIClient() # un authorized access", "not define' #un authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "test_delete_geography(self): client = APIClient() # un authorized access by user geography_obj = mixer.blend(Geography)", "response.status_code == 400, 'serializers errors' # location already added user_obj = mixer.blend(User,admin=True) payload", "token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'admin can access' def test_post_geography(self):", "exists' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography',", "response.status_code == 200, 'admin can access' #authorized access by admin user_obj = mixer.blend(User,admin=True)", "edit' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code ==", "by user patient_obj = mixer.blend(Patient) response = client.put('/api/v1/geography') assert response.status_code == 401, 'Un", "# -*- coding:utf-8 -*- from django.contrib.auth.models import Permission import pytest from faker import", "token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code ==", "client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200, 'only admin can edit' #", "authorized access by user geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code ==", "== 400, 'only admin can add' # location already added user_obj = mixer.blend(User,admin=True)", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code", "patient_obj = mixer.blend(Patient) response = client.post('/api/v1/geography') assert response.status_code == 401, 'Un authorized access", "client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200, 'admin can access' #authorized access by admin user_obj", "mixer.backend.django import mixer from rest_framework import status from rest_framework.test import APIClient from rest_framework_jwt.settings", "authorized user with admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token =", "client.put('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.' # unauthorized user user_obj", "= client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' #authorized access by admin user_obj", "'admin can access' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "denied.' # unauthorized user user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "import Geography, ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake", "def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client = APIClient() # un", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code ==", "'street_address should contain only string' # authorized user user_obj = mixer.blend(User,admin=True) payload =", "'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204, 'content not found' def test_delete_geography(self): client = APIClient()", "400, 'only admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj)", "response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204, 'content not found' #un authorized access", "= User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token)", "assert response.status_code == 401, 'Un authorized access denied.' # authorized user user_obj =", "{'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add' # location already", "assert response.status_code == 200, 'user can access' user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "from rest_framework.test import APIClient from rest_framework_jwt.settings import api_settings from django.test import TestCase from", "by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "= client.get('/api/v1/geography') assert response.status_code == 200, 'user can access' user_obj = mixer.blend(User,admin=True) payload", "= api_settings.JWT_ENCODE_HANDLER fake = Faker() import re class TestGeography(TestCase): def test_list_geography(self): client =", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code ==", "== 401, 'Un authorized access denied.' # unauthorized user user_obj = mixer.blend(User) payload", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\", "= mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code == 400, 'serializers", "access by user response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' user_obj", "user patient_obj = mixer.blend(Patient) response = client.post('/api/v1/geography') assert response.status_code == 401, 'Un authorized", "'user can access' user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200, 'only admin", "+ token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code", "'list geography' user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "+ token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204, 'content", "Treatment from addressapp.models import Geography, ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler", "jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json')", "Faker from mixer.backend.django import mixer from rest_framework import status from rest_framework.test import APIClient", "== 401, 'Un authorized access denied.' # authorized user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name())", "geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204, 'data delete' #un", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code", "errors' # location already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'user", "'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add' # authorized user with", "contain only string' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "== 200, 'admin can access' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload", "mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204, 'content not found' #authorized access", "\\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code == 400, 'serializers errors' # authorized user user_obj", "client = APIClient() # un authorized access by user patient_obj = mixer.blend(Patient) response", "\\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200, 'geography added' # serializers errors user_obj", "response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain", "+ token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400,", "mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should", "access' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204, 'content not found' #authorized access by admin user_obj", "'admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client =", "response.status_code == 200, 'geography added' # serializers errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload", "= mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\")", "should contain only string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client = APIClient() # un", "from faker import Faker from mixer.backend.django import mixer from rest_framework import status from", "token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code ==", "204, 'data delete' #un authorized access by admin user_obj = mixer.blend(User,admin=True) payload =", "geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204,", "response.status_code == 400, 'only admin can add' # location already added user_obj =", "{'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists' # authorized user user_obj", "class TestGeography(TestCase): def test_list_geography(self): client = APIClient() # un authorized access by user", "{'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'serializers errors' # location already added user_obj", "assert response.status_code == 200, 'admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj", "'Permission not define' #un authorized access by admin user_obj = mixer.blend(User,admin=True) payload =", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\", "APIClient() # un authorized access by user response = client.get('/api/v1/geography') assert response.status_code ==", "token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin", "jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\", "204, 'content not found' #authorized access by admin user_obj = mixer.blend(User) payload =", "TestGeography(TestCase): def test_list_geography(self): client = APIClient() # un authorized access by user response", "authorized access by user patient_obj = mixer.blend(Patient) response = client.put('/api/v1/geography') assert response.status_code ==", "response.status_code == 400, 'street_address should contain only string' # authorized user user_obj =", "by user response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' user_obj =", "200, 'admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client", "response.status_code == 204, 'data delete' #un authorized access by admin user_obj = mixer.blend(User,admin=True)", "ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake = Faker()", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id))", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'user can", "response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' #authorized access by admin", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json')", "' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400,", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography') assert response.status_code == 200,", "204, 'content not found' def test_delete_geography(self): client = APIClient() # un authorized access", "from encounterapp.models import Encounter, History, Refer from treatmentapp.models import Treatment from addressapp.models import", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\", "import pytest from faker import Faker from mixer.backend.django import mixer from rest_framework import", "response.status_code == 204, 'content not found' #un authorized access by admin user_obj =", "found' def test_delete_geography(self): client = APIClient() # un authorized access by user geography_obj", "user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "assert response.status_code == 200, 'geography added' # serializers errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True)", "+ token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204, 'data", "by user geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401, 'Permission", "# un authorized access by user patient_obj = mixer.blend(Patient) response = client.post('/api/v1/geography') assert", "jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\", "patientapp.models import Patient from encounterapp.models import Encounter, History, Refer from treatmentapp.models import Treatment", "geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400,", "pytestmark = pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER fake = Faker() import", "import Treatment from addressapp.models import Geography, ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER", "Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.post('/api/v1/geography', \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already", "== 204, 'content not found' #un authorized access by admin user_obj = mixer.blend(User)", "admin can edit' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "assert response.status_code == 200, 'admin can access' #authorized access by admin user_obj =", "user patient_obj = mixer.blend(Patient) response = client.put('/api/v1/geography') assert response.status_code == 401, 'Un authorized", "admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response", "client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204, 'data delete' #un authorized access by admin user_obj", "response.status_code == 401, 'list geography' user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token =", "+ token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only", "== 204, 'content not found' def test_delete_geography(self): client = APIClient() # un authorized", "payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response =", "= client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204, 'data delete' #un authorized access by admin", "token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code ==", "geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code == 400,", "mixer.blend(Patient) response = client.put('/api/v1/geography') assert response.status_code == 401, 'Un authorized access denied.' #", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography') assert response.status_code ==", "user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token)", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert", "= mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\")", "should contain only string' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "from rest_framework import status from rest_framework.test import APIClient from rest_framework_jwt.settings import api_settings from", "= jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\", "401, 'Permission not define' #un authorized access by admin user_obj = mixer.blend(User,admin=True) payload", "user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'serializers errors' #", "assert response.status_code == 204, 'content not found' def test_delete_geography(self): client = APIClient() #", "'street_address should contain only string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client = APIClient() #", "response.status_code == 204, 'content not found' #authorized access by admin user_obj = mixer.blend(User)", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert", "errors' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "== 200, 'geography added' # serializers errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload =", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json')", "added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400,", "'serializers errors' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "addressapp.models import Geography, ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER", "client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204, 'content not found' #un authorized access by admin", "status from rest_framework.test import APIClient from rest_framework_jwt.settings import api_settings from django.test import TestCase", "geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200,", "not found' def test_delete_geography(self): client = APIClient() # un authorized access by user", "' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 204,", "mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401, 'Permission not define' #un authorized", "{'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200, 'geography added' # serializers errors user_obj =", "'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'serializers errors' # location already added user_obj =", "-*- coding:utf-8 -*- from django.contrib.auth.models import Permission import pytest from faker import Faker", "'only admin can access' def test_post_geography(self): patient_obj = mixer.blend(Patient) encounter_obj = mixer.blend(Encounter,patient=patient_obj) client", "with admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin", "assert response.status_code == 401, 'list geography' user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token", "# location already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "200, 'admin can access' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload =", "= client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only", "access by user patient_obj = mixer.blend(Patient) response = client.put('/api/v1/geography') assert response.status_code == 401,", "client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200, 'geography added' # serializers errors", "by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response", "# authorized user with admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token", "access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT", "client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists' # authorized", "payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography')", "' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545))", "jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response =", "' + token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200,", "admin can add' # location already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj)", "= mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200, 'admin can access' #authorized", "response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can", "import status from rest_framework.test import APIClient from rest_framework_jwt.settings import api_settings from django.test import", "401, 'Un authorized access denied.' # unauthorized user user_obj = mixer.blend(User) payload =", "assert response.status_code == 400, 'street_address should contain only string' # authorized user user_obj", "string' # authorized user user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "= client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'serializers errors' # location", "django.test import TestCase from userapp.models import User from patientapp.models import Patient from encounterapp.models", "assert response.status_code == 400, 'serializers errors' # location already added user_obj = mixer.blend(User,admin=True)", "add' # authorized user with admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj)", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json')", "{'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert response.status_code == 204, 'content not found' def test_delete_geography(self): client =", "client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401, 'Permission not define' #un authorized access by admin", "+ token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only", "assert response.status_code == 400, 'only admin can add' # authorized user with admin", "token) geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200, 'admin can", "found' #un authorized access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token", "'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200, 'geography added' # serializers errors user_obj = User.objects.create(email=fake.email(),\\", "access' user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "test_listupdate_geography(self): client = APIClient() # un authorized access by user response = client.get('/api/v1/geography')", "client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 400, 'only admin can access' def test_post_geography(self): patient_obj =", "{'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert response.status_code == 400, 'serializers errors' # authorized user user_obj =", "= APIClient() # un authorized access by user patient_obj = mixer.blend(Patient) response =", "{'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only admin can add' # authorized user", "response.status_code == 204, 'content not found' def test_delete_geography(self): client = APIClient() # un", "client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj)", "user with admin user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload)", "+ token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert response.status_code == 204, 'content", "response = client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain", "unauthorized user user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "Permission import pytest from faker import Faker from mixer.backend.django import mixer from rest_framework", "import re class TestGeography(TestCase): def test_list_geography(self): client = APIClient() # un authorized access", "= client.get('/api/v1/geography') assert response.status_code == 200, 'admin can access' def test_post_geography(self): patient_obj =", "' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code ==", "mixer.blend(Encounter,patient=patient_obj) client = APIClient() # un authorized access by user patient_obj = mixer.blend(Patient)", "'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code == 400, 'location already exists' # authorized user user_obj =", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':\"ktm\",'state':\"ktm\",\\ 'country':\"Nepal\",'street_address':\"ktm\"},format='json') assert response.status_code", "400, 'only admin can add' # authorized user with admin user_obj = User.objects.create(email=fake.email(),\\", "'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200, 'only admin can edit' # authorized user user_obj", "\\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only string' class", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(326545)) assert", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code ==", "jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'admin", "from django.test import TestCase from userapp.models import User from patientapp.models import Patient from", "{'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200, 'only admin can edit' # authorized user", "can access' #authorized access by admin user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' #authorized access by admin user_obj =", "+ token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':'',\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'serializers", "= mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204, 'content not found' #authorized", "access denied.' # unauthorized user user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token =", "+ token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'admin can access' def", "coding:utf-8 -*- from django.contrib.auth.models import Permission import pytest from faker import Faker from", "encounterapp.models import Encounter, History, Refer from treatmentapp.models import Treatment from addressapp.models import Geography,", "= client.post('/api/v1/geography', \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address should contain only", "by user response = client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' #authorized access", "= client.get('/api/v1/geography') assert response.status_code == 401, 'list geography' user_obj = mixer.blend(User) payload =", "fake = Faker() import re class TestGeography(TestCase): def test_list_geography(self): client = APIClient() #", "' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert", "mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj =", "assert response.status_code == 204, 'content not found' #un authorized access by admin user_obj", "= jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj=Geography.objects.create(city=\"ktm\",state=\"ktm\",street_address=\"ktm\",country=\"nepal\") response = client.put('/api/v1/geography/'+str(geography_obj.id),", "first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) response =", "'Un authorized access denied.' # unauthorized user user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj)", "un authorized access by user response = client.get('/api/v1/geography') assert response.status_code == 401, 'list", "token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id),", "\\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200, 'only admin can edit' # authorized", "geography' user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' +", "= mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':\"ktm\",\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 200, 'only", "= jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\", "response = client.get('/api/v1/geography') assert response.status_code == 200, 'user can access' user_obj = mixer.blend(User,admin=True)", "= mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':fake.name()},format='json') assert response.status_code == 400, 'only", "= client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 401, 'Permission not define' #un authorized access by", "def test_list_geography(self): client = APIClient() # un authorized access by user response =", "'state':fake.name(),'country':''},format='json') assert response.status_code == 400, 'serializers errors' # authorized user user_obj = mixer.blend(User,admin=True)", "== 400, 'street_address should contain only string' # authorized user user_obj = mixer.blend(User,admin=True)", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.delete('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code ==", "'content not found' #authorized access by admin user_obj = mixer.blend(User) payload = jwt_payload_handler(user_obj)", "errors user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name(),admin=True) payload = jwt_payload_handler(user_obj) token = jwt_encode_handler(payload) client.credentials(HTTP_AUTHORIZATION='JWT '", "from addressapp.models import Geography, ActivityArea pytestmark = pytest.mark.django_db jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER jwt_encode_handler =", "client.credentials(HTTP_AUTHORIZATION='JWT ' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json')", "assert response.status_code == 401, 'Permission not define' #un authorized access by admin user_obj", "token) response = client.get('/api/v1/geography') assert response.status_code == 200, 'user can access' user_obj =", "geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400,", "200, 'only admin can edit' # authorized user user_obj = mixer.blend(User,admin=True) payload =", "geography_obj = mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(23656544654)) assert response.status_code == 204, 'content not found'", "from mixer.backend.django import mixer from rest_framework import status from rest_framework.test import APIClient from", "'serializers errors' # location already added user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token", "class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client = APIClient() # un authorized access by user", "def test_delete_geography(self): client = APIClient() # un authorized access by user geography_obj =", "mixer.blend(Geography) response = client.get('/api/v1/geography/'+str(geography_obj.id)) assert response.status_code == 200, 'admin can access' #authorized access", "from userapp.models import User from patientapp.models import Patient from encounterapp.models import Encounter, History,", "assert response.status_code == 401, 'list geography' #authorized access by admin user_obj = mixer.blend(User,admin=True)", "token) response = client.post('/api/v1/geography', \\ {'city':fake.name(),'state':fake.name(),\\ 'country':fake.name(),'street_address':\"ktm\"},format='json') assert response.status_code == 200, 'geography added'", "access denied.' # authorized user user_obj = User.objects.create(email=fake.email(),\\ first_name=fake.name(),last_name=fake.name()) payload = jwt_payload_handler(user_obj) token", "from rest_framework_jwt.settings import api_settings from django.test import TestCase from userapp.models import User from", "' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(1165465456), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':'Nepal'},format='json') assert", "encounter_obj = mixer.blend(Encounter,patient=patient_obj) client = APIClient() # un authorized access by user patient_obj", "= mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':\"Nepal\"},format='json') assert response.status_code == 400, 'street_address", "import Permission import pytest from faker import Faker from mixer.backend.django import mixer from", "test_list_geography(self): client = APIClient() # un authorized access by user response = client.get('/api/v1/geography')", "response.status_code == 400, 'street_address should contain only string' class TestGeographyUpdate(TestCase): def test_listupdate_geography(self): client", "userapp.models import User from patientapp.models import Patient from encounterapp.models import Encounter, History, Refer", "== 200, 'user can access' user_obj = mixer.blend(User,admin=True) payload = jwt_payload_handler(user_obj) token =", "' + token) geography_obj = mixer.blend(Geography) response = client.put('/api/v1/geography/'+str(geography_obj.id), \\ {'city':fake.name(),'street_address':fake.name(),\\ 'state':fake.name(),'country':''},format='json') assert" ]
[ "flat file dumps', packages = find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'],", "url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status :: 4 -", "description='Python interface for reading and converting EuPathDB flat file dumps', packages = find_packages(),", "find_packages try: import multiprocessing except ImportError: pass setup( name='eupathtables', version='0.1', description='Python interface for", "and converting EuPathDB flat file dumps', packages = find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector',", "name='eupathtables', version='0.1', description='Python interface for reading and converting EuPathDB flat file dumps', packages", "for reading and converting EuPathDB flat file dumps', packages = find_packages(), author='<NAME>', author_email='<EMAIL>',", "scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Scientific/Engineering ::", "<reponame>satta/eupathtables import glob from setuptools import setup, find_packages try: import multiprocessing except ImportError:", "ImportError: pass setup( name='eupathtables', version='0.1', description='Python interface for reading and converting EuPathDB flat", "tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status :: 4 - Beta', 'Topic", "license='ISC', classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Scientific/Engineering :: Bio-Informatics',", "file dumps', packages = find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'),", "import setup, find_packages try: import multiprocessing except ImportError: pass setup( name='eupathtables', version='0.1', description='Python", "pass setup( name='eupathtables', version='0.1', description='Python interface for reading and converting EuPathDB flat file", "author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status :: 4", "version='0.1', description='Python interface for reading and converting EuPathDB flat file dumps', packages =", "packages = find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[", "find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status", "reading and converting EuPathDB flat file dumps', packages = find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables',", "glob from setuptools import setup, find_packages try: import multiprocessing except ImportError: pass setup(", "dumps', packages = find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC',", "author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status ::", "EuPathDB flat file dumps', packages = find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >=", "classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Scientific/Engineering :: Bio-Informatics', ],", "1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Scientific/Engineering", "interface for reading and converting EuPathDB flat file dumps', packages = find_packages(), author='<NAME>',", "setup( name='eupathtables', version='0.1', description='Python interface for reading and converting EuPathDB flat file dumps',", "multiprocessing except ImportError: pass setup( name='eupathtables', version='0.1', description='Python interface for reading and converting", "converting EuPathDB flat file dumps', packages = find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose", "from setuptools import setup, find_packages try: import multiprocessing except ImportError: pass setup( name='eupathtables',", "import glob from setuptools import setup, find_packages try: import multiprocessing except ImportError: pass", "setup, find_packages try: import multiprocessing except ImportError: pass setup( name='eupathtables', version='0.1', description='Python interface", "= find_packages(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/satta/eupathtables', test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development", "try: import multiprocessing except ImportError: pass setup( name='eupathtables', version='0.1', description='Python interface for reading", ">= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status :: 4 - Beta', 'Topic ::", "'Development Status :: 4 - Beta', 'Topic :: Scientific/Engineering :: Bio-Informatics', ], )", "except ImportError: pass setup( name='eupathtables', version='0.1', description='Python interface for reading and converting EuPathDB", "test_suite='nose.collector', tests_require=['nose >= 1.3'], scripts=glob.glob('scripts/*'), license='ISC', classifiers=[ 'Development Status :: 4 - Beta',", "setuptools import setup, find_packages try: import multiprocessing except ImportError: pass setup( name='eupathtables', version='0.1',", "import multiprocessing except ImportError: pass setup( name='eupathtables', version='0.1', description='Python interface for reading and" ]
[ "L(n-1) + L(n-2) from time import perf_counter from math import sqrt from doctest", "µs' % elapsed) def lucas_sequence(n0: int, n1: int, n2: int) -> None: L0,", "\") L0, L1 = L1, L0+L1 def lucas_sequence_timer(n0: int, n1: int, n2: int)", "= False else: print('Stopwatch not running') def reset(self): self.__start_time = self.__elapsed = 0", "n1 if n2 >= 1: print(L0, end=\" \") if n2 >= 2: print(L1,", "11 18 29 47 76 >>> lucas_sequence(1,1,12) 1 1 2 3 5 8", "elapsed = round(elapsed, 2) print('%.2f ms' % elapsed) else: elapsed = time*1000000 elapsed", ">>> lucas_sequence(1,1,12) 1 1 2 3 5 8 13 21 34 55 89", "lucas_sequence_timer(n0: int, n1: int, n2: int) -> None: timer = Stopwatch() timer.start() L0,", "0.001: elapsed = time*1000 elapsed = round(elapsed, 2) print('%.2f ms' % elapsed) else:", "sys class Stopwatch: def __init__(self): self.reset() def start(self): if not self.__running: self.__start_time =", "round(elapsed, 2) print('%.2f µs' % elapsed) def lucas_sequence(n0: int, n1: int, n2: int)", "% elapsed) elif time > 0.001: elapsed = time*1000 elapsed = round(elapsed, 2)", "def start(self): if not self.__running: self.__start_time = perf_counter() self.__running = True else: print('Stopwatch", "timer.start() L0, L1 = n0, n1 if n2 >= 1: print(L0, end=\" \")", "of numbers # such that L(n) = L(n-1) + L(n-2) from time import", "return self.__elapsed else: print('Stopwatch must be stopped') return None def print_time(time:float) -> None:", "def lucas_sequence_last_timer(n0: int, n1: int, n2: int) -> None: timer = Stopwatch() timer.start()", "i in range(0, n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \") timer.stop()", "lucas_sequence(n0: int, n1: int, n2: int) -> None: L0, L1 = n0, n1", "4261 6569 10830 17399 >>> lucas_sequence(5,-20,6) 5 -20 -15 -35 -50 -85 >>>", "= n0, n1 if n2 >= 1: print(L0, end=\" \") if n2 >=", "else: print('Stopwatch must be stopped') return None def print_time(time:float) -> None: print('\\nElapsed: ',", "n1: int, n2: int) -> None: timer = Stopwatch() timer.start() L0, L1 =", "L1, L0+L1 print(L1, end=\" \") def lucas_sequence_last_timer(n0: int, n1: int, n2: int) ->", "print_time(time:float) -> None: print('\\nElapsed: ', end=\"\") if time > 1.0: elapsed = round(time,", "from math import sqrt from doctest import testmod import sys class Stopwatch: def", "1 3 4 7 11 18 29 47 76 >>> lucas_sequence(1,1,12) 1 1", "doctest import testmod import sys class Stopwatch: def __init__(self): self.reset() def start(self): if", "test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0 0 0 0 0 0 >>> lucas_sequence(2,1,10) 2", "<gh_stars>0 # Lucas Sequence L is a sequence of numbers # such that", "= Stopwatch() timer.start() L0, L1 = n0, n1 for i in range(0, n2-2):", "None: L0, L1 = n0, n1 for i in range(0, n2-2): L0, L1", "lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if __name__ == \"__main__\": if(sys.argv[3:]): lucas_sequence(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])) else:", "range(0, n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 def lucas_sequence_timer(n0: int,", "running') def reset(self): self.__start_time = self.__elapsed = 0 self.__running = False def elapsed(self):", "None: print('\\nElapsed: ', end=\"\") if time > 1.0: elapsed = round(time, 3) print('%.3f", "L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1: int, n2: int) -> None: L0,", "= False def elapsed(self): if not self.__running: return self.__elapsed else: print('Stopwatch must be", "testmod import sys class Stopwatch: def __init__(self): self.reset() def start(self): if not self.__running:", "= L1, L0+L1 print(L1, end=\" \") def lucas_sequence_last_timer(n0: int, n1: int, n2: int)", "L1, L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1: int, n2: int) -> None:", "perf_counter()-self.__start_time self.__running = False else: print('Stopwatch not running') def reset(self): self.__start_time = self.__elapsed", "print('Stopwatch not running') def reset(self): self.__start_time = self.__elapsed = 0 self.__running = False", "i in range(0, n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \") def", "print('%.3f s' % elapsed) elif time > 0.001: elapsed = time*1000 elapsed =", "n2: int) -> None: L0, L1 = n0, n1 if n2 >= 1:", "time > 0.001: elapsed = time*1000 elapsed = round(elapsed, 2) print('%.2f ms' %", "L0+L1 def lucas_sequence_timer(n0: int, n1: int, n2: int) -> None: timer = Stopwatch()", "3 4 7 11 18 29 47 76 >>> lucas_sequence(1,1,12) 1 1 2", "if not self.__running: self.__start_time = perf_counter() self.__running = True else: print('Stopwatch already running')", "in range(0, n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \") timer.stop() print_time(timer.elapsed())", "range(0, n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \") def lucas_sequence_last_timer(n0: int,", "= L1, L0+L1 def lucas_sequence_timer(n0: int, n1: int, n2: int) -> None: timer", "47 76 >>> lucas_sequence(1,1,12) 1 1 2 3 5 8 13 21 34", "round(time, 3) print('%.3f s' % elapsed) elif time > 0.001: elapsed = time*1000", "76 >>> lucas_sequence(1,1,12) 1 1 2 3 5 8 13 21 34 55", "2 1 3 4 7 11 18 29 47 76 >>> lucas_sequence(1,1,12) 1", "n1: int, n2: int) -> None: L0, L1 = n0, n1 if n2", "lucas_sequence(2308,4261,5) 2308 4261 6569 10830 17399 >>> lucas_sequence(5,-20,6) 5 -20 -15 -35 -50", "if time > 1.0: elapsed = round(time, 3) print('%.3f s' % elapsed) elif", "else: print('Stopwatch not running') def reset(self): self.__start_time = self.__elapsed = 0 self.__running =", "self.__running: self.__start_time = perf_counter() self.__running = True else: print('Stopwatch already running') def stop(self):", "self.__running = True else: print('Stopwatch already running') def stop(self): if self.__running: self.__elapsed +=", "not self.__running: self.__start_time = perf_counter() self.__running = True else: print('Stopwatch already running') def", "17399 >>> lucas_sequence(5,-20,6) 5 -20 -15 -35 -50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\"", "L0, L1 = n0, n1 if n2 >= 1: print(L0, end=\" \") if", "n1 for i in range(0, n2-2): L0, L1 = L1, L0+L1 print(L1, end=\"", "L1 = L1, L0+L1 print(L1, end=\" \") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>>", "else: print('Stopwatch already running') def stop(self): if self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running =", "must be stopped') return None def print_time(time:float) -> None: print('\\nElapsed: ', end=\"\") if", "1: print(L0, end=\" \") if n2 >= 2: print(L1, end=\" \") for i", ">>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if __name__ == \"__main__\": if(sys.argv[3:]): lucas_sequence(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]))", "int, n2: int) -> None: L0, L1 = n0, n1 if n2 >=", "13 21 34 55 89 144 >>> lucas_sequence(2308,4261,5) 2308 4261 6569 10830 17399", "self.reset() def start(self): if not self.__running: self.__start_time = perf_counter() self.__running = True else:", "int, n1: int, n2: int) -> None: L0, L1 = n0, n1 for", "3 5 8 13 21 34 55 89 144 >>> lucas_sequence(2308,4261,5) 2308 4261", "2308 4261 6569 10830 17399 >>> lucas_sequence(5,-20,6) 5 -20 -15 -35 -50 -85", "\") for i in range(0, n2-2): print(L0+L1, end=\" \") L0, L1 = L1,", "timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0 0 0 0 0 0", "L1 = L1, L0+L1 print(L1, end=\" \") def lucas_sequence_last_timer(n0: int, n1: int, n2:", "None: timer = Stopwatch() timer.start() L0, L1 = n0, n1 for i in", "None: timer = Stopwatch() timer.start() L0, L1 = n0, n1 if n2 >=", "lucas_sequence(2,1,10) 2 1 3 4 7 11 18 29 47 76 >>> lucas_sequence(1,1,12)", "8 13 21 34 55 89 144 >>> lucas_sequence(2308,4261,5) 2308 4261 6569 10830", "', end=\"\") if time > 1.0: elapsed = round(time, 3) print('%.3f s' %", "def reset(self): self.__start_time = self.__elapsed = 0 self.__running = False def elapsed(self): if", "elapsed = time*1000000 elapsed = round(elapsed, 2) print('%.2f µs' % elapsed) def lucas_sequence(n0:", "18 29 47 76 >>> lucas_sequence(1,1,12) 1 1 2 3 5 8 13", "end=\" \") L0, L1 = L1, L0+L1 def lucas_sequence_timer(n0: int, n1: int, n2:", "L(n) = L(n-1) + L(n-2) from time import perf_counter from math import sqrt", "L1, L0+L1 print(L1, end=\" \") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0", "= Stopwatch() timer.start() L0, L1 = n0, n1 if n2 >= 1: print(L0,", "self.__start_time = self.__elapsed = 0 self.__running = False def elapsed(self): if not self.__running:", "int) -> None: L0, L1 = n0, n1 if n2 >= 1: print(L0,", "= round(time, 3) print('%.3f s' % elapsed) elif time > 0.001: elapsed =", "n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \") def lucas_sequence_last_timer(n0: int, n1:", "timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1: int, n2: int) -> None: L0, L1", "-20 -15 -35 -50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if __name__ ==", "None: L0, L1 = n0, n1 if n2 >= 1: print(L0, end=\" \")", "print(L1, end=\" \") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0 0 0", "0 self.__running = False def elapsed(self): if not self.__running: return self.__elapsed else: print('Stopwatch", "in range(0, n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \") def lucas_sequence_last_timer(n0:", "else: elapsed = time*1000000 elapsed = round(elapsed, 2) print('%.2f µs' % elapsed) def", "L is a sequence of numbers # such that L(n) = L(n-1) +", "L1 = L1, L0+L1 def lucas_sequence_timer(n0: int, n1: int, n2: int) -> None:", "from doctest import testmod import sys class Stopwatch: def __init__(self): self.reset() def start(self):", "-50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if __name__ == \"__main__\": if(sys.argv[3:]): lucas_sequence(int(sys.argv[1]),", ">= 2: print(L1, end=\" \") for i in range(0, n2-2): print(L0+L1, end=\" \")", "that L(n) = L(n-1) + L(n-2) from time import perf_counter from math import", "print('%.2f ms' % elapsed) else: elapsed = time*1000000 elapsed = round(elapsed, 2) print('%.2f", "perf_counter from math import sqrt from doctest import testmod import sys class Stopwatch:", "6569 10830 17399 >>> lucas_sequence(5,-20,6) 5 -20 -15 -35 -50 -85 >>> lucas_sequence_last(2,1,100)", "2) print('%.2f ms' % elapsed) else: elapsed = time*1000000 elapsed = round(elapsed, 2)", "Stopwatch() timer.start() L0, L1 = n0, n1 for i in range(0, n2-2): L0,", "# such that L(n) = L(n-1) + L(n-2) from time import perf_counter from", "3) print('%.3f s' % elapsed) elif time > 0.001: elapsed = time*1000 elapsed", "elif time > 0.001: elapsed = time*1000 elapsed = round(elapsed, 2) print('%.2f ms'", "from time import perf_counter from math import sqrt from doctest import testmod import", "print(L0, end=\" \") if n2 >= 2: print(L1, end=\" \") for i in", "Stopwatch() timer.start() L0, L1 = n0, n1 if n2 >= 1: print(L0, end=\"", "0 >>> lucas_sequence(2,1,10) 2 1 3 4 7 11 18 29 47 76", "print('Stopwatch already running') def stop(self): if self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running = False", "L0, L1 = L1, L0+L1 print(L1, end=\" \") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\"", "self.__elapsed += perf_counter()-self.__start_time self.__running = False else: print('Stopwatch not running') def reset(self): self.__start_time", "1.0: elapsed = round(time, 3) print('%.3f s' % elapsed) elif time > 0.001:", "= round(elapsed, 2) print('%.2f µs' % elapsed) def lucas_sequence(n0: int, n1: int, n2:", ">= 1: print(L0, end=\" \") if n2 >= 2: print(L1, end=\" \") for", "L0+L1 print(L1, end=\" \") def lucas_sequence_last_timer(n0: int, n1: int, n2: int) -> None:", "def elapsed(self): if not self.__running: return self.__elapsed else: print('Stopwatch must be stopped') return", "10830 17399 >>> lucas_sequence(5,-20,6) 5 -20 -15 -35 -50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124", "+= perf_counter()-self.__start_time self.__running = False else: print('Stopwatch not running') def reset(self): self.__start_time =", "144 >>> lucas_sequence(2308,4261,5) 2308 4261 6569 10830 17399 >>> lucas_sequence(5,-20,6) 5 -20 -15", "elapsed) def lucas_sequence(n0: int, n1: int, n2: int) -> None: L0, L1 =", "def lucas_sequence(n0: int, n1: int, n2: int) -> None: L0, L1 = n0,", "lucas_sequence(5,-20,6) 5 -20 -15 -35 -50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if", "if n2 >= 1: print(L0, end=\" \") if n2 >= 2: print(L1, end=\"", "n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 def lucas_sequence_timer(n0: int, n1:", "5 -20 -15 -35 -50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if __name__", "= time*1000 elapsed = round(elapsed, 2) print('%.2f ms' % elapsed) else: elapsed =", "elapsed(self): if not self.__running: return self.__elapsed else: print('Stopwatch must be stopped') return None", "lucas_sequence(0,0,6) 0 0 0 0 0 0 >>> lucas_sequence(2,1,10) 2 1 3 4", ">>> lucas_sequence(5,-20,6) 5 -20 -15 -35 -50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass", "such that L(n) = L(n-1) + L(n-2) from time import perf_counter from math", "55 89 144 >>> lucas_sequence(2308,4261,5) 2308 4261 6569 10830 17399 >>> lucas_sequence(5,-20,6) 5", "int) -> None: L0, L1 = n0, n1 for i in range(0, n2-2):", "lucas_sequence_last(n0: int, n1: int, n2: int) -> None: L0, L1 = n0, n1", "in range(0, n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 timer.stop() print_time(timer.elapsed())", "ms' % elapsed) else: elapsed = time*1000000 elapsed = round(elapsed, 2) print('%.2f µs'", "= n0, n1 for i in range(0, n2-2): L0, L1 = L1, L0+L1", "L0, L1 = L1, L0+L1 print(L1, end=\" \") def lucas_sequence_last_timer(n0: int, n1: int,", "= 0 self.__running = False def elapsed(self): if not self.__running: return self.__elapsed else:", "2: print(L1, end=\" \") for i in range(0, n2-2): print(L0+L1, end=\" \") L0,", "print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0 0 0 0 0 0 >>>", "= L1, L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1: int, n2: int) ->", "for i in range(0, n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \")", "34 55 89 144 >>> lucas_sequence(2308,4261,5) 2308 4261 6569 10830 17399 >>> lucas_sequence(5,-20,6)", "end=\" \") L0, L1 = L1, L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1:", "-> None: timer = Stopwatch() timer.start() L0, L1 = n0, n1 if n2", "> 1.0: elapsed = round(time, 3) print('%.3f s' % elapsed) elif time >", "2 3 5 8 13 21 34 55 89 144 >>> lucas_sequence(2308,4261,5) 2308", "def lucas_sequence_timer(n0: int, n1: int, n2: int) -> None: timer = Stopwatch() timer.start()", "time*1000 elapsed = round(elapsed, 2) print('%.2f ms' % elapsed) else: elapsed = time*1000000", "n1: int, n2: int) -> None: L0, L1 = n0, n1 for i", "class Stopwatch: def __init__(self): self.reset() def start(self): if not self.__running: self.__start_time = perf_counter()", "end=\"\") if time > 1.0: elapsed = round(time, 3) print('%.3f s' % elapsed)", "print('%.2f µs' % elapsed) def lucas_sequence(n0: int, n1: int, n2: int) -> None:", "\") if n2 >= 2: print(L1, end=\" \") for i in range(0, n2-2):", "4 7 11 18 29 47 76 >>> lucas_sequence(1,1,12) 1 1 2 3", "L0, L1 = n0, n1 for i in range(0, n2-2): L0, L1 =", "= True else: print('Stopwatch already running') def stop(self): if self.__running: self.__elapsed += perf_counter()-self.__start_time", "0 0 0 0 0 0 >>> lucas_sequence(2,1,10) 2 1 3 4 7", "stopped') return None def print_time(time:float) -> None: print('\\nElapsed: ', end=\"\") if time >", ">>> lucas_sequence(0,0,6) 0 0 0 0 0 0 >>> lucas_sequence(2,1,10) 2 1 3", "import perf_counter from math import sqrt from doctest import testmod import sys class", "L0, L1 = L1, L0+L1 def lucas_sequence_timer(n0: int, n1: int, n2: int) ->", "s' % elapsed) elif time > 0.001: elapsed = time*1000 elapsed = round(elapsed,", "2) print('%.2f µs' % elapsed) def lucas_sequence(n0: int, n1: int, n2: int) ->", "for i in range(0, n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1", "7 11 18 29 47 76 >>> lucas_sequence(1,1,12) 1 1 2 3 5", "end=\" \") for i in range(0, n2-2): print(L0+L1, end=\" \") L0, L1 =", "i in range(0, n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 def", "self.__elapsed else: print('Stopwatch must be stopped') return None def print_time(time:float) -> None: print('\\nElapsed:", "def __init__(self): self.reset() def start(self): if not self.__running: self.__start_time = perf_counter() self.__running =", "reset(self): self.__start_time = self.__elapsed = 0 self.__running = False def elapsed(self): if not", ">>> lucas_sequence(2,1,10) 2 1 3 4 7 11 18 29 47 76 >>>", "return None def print_time(time:float) -> None: print('\\nElapsed: ', end=\"\") if time > 1.0:", "L0+L1 print(L1, end=\" \") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0 0", "elapsed) elif time > 0.001: elapsed = time*1000 elapsed = round(elapsed, 2) print('%.2f", "__init__(self): self.reset() def start(self): if not self.__running: self.__start_time = perf_counter() self.__running = True", "print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 def lucas_sequence_timer(n0: int, n1: int,", "= round(elapsed, 2) print('%.2f ms' % elapsed) else: elapsed = time*1000000 elapsed =", "elapsed) else: elapsed = time*1000000 elapsed = round(elapsed, 2) print('%.2f µs' % elapsed)", "1 2 3 5 8 13 21 34 55 89 144 >>> lucas_sequence(2308,4261,5)", "print(L1, end=\" \") def lucas_sequence_last_timer(n0: int, n1: int, n2: int) -> None: timer", "Lucas Sequence L is a sequence of numbers # such that L(n) =", "5 8 13 21 34 55 89 144 >>> lucas_sequence(2308,4261,5) 2308 4261 6569", "n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence():", "L1 = n0, n1 if n2 >= 1: print(L0, end=\" \") if n2", "range(0, n2-2): L0, L1 = L1, L0+L1 print(L1, end=\" \") timer.stop() print_time(timer.elapsed()) def", "+ L(n-2) from time import perf_counter from math import sqrt from doctest import", "def print_time(time:float) -> None: print('\\nElapsed: ', end=\"\") if time > 1.0: elapsed =", "end=\" \") def lucas_sequence_last_timer(n0: int, n1: int, n2: int) -> None: timer =", "print('Stopwatch must be stopped') return None def print_time(time:float) -> None: print('\\nElapsed: ', end=\"\")", "= L(n-1) + L(n-2) from time import perf_counter from math import sqrt from", "-15 -35 -50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if __name__ == \"__main__\":", "\") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0 0 0 0 0", "n2: int) -> None: L0, L1 = n0, n1 for i in range(0,", "elapsed = round(elapsed, 2) print('%.2f µs' % elapsed) def lucas_sequence(n0: int, n1: int,", "self.__running: return self.__elapsed else: print('Stopwatch must be stopped') return None def print_time(time:float) ->", "-> None: print('\\nElapsed: ', end=\"\") if time > 1.0: elapsed = round(time, 3)", "already running') def stop(self): if self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running = False else:", "def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0 0 0 0 0 0 >>> lucas_sequence(2,1,10)", "int) -> None: timer = Stopwatch() timer.start() L0, L1 = n0, n1 if", "round(elapsed, 2) print('%.2f ms' % elapsed) else: elapsed = time*1000000 elapsed = round(elapsed,", "-> None: timer = Stopwatch() timer.start() L0, L1 = n0, n1 for i", "L1, L0+L1 def lucas_sequence_timer(n0: int, n1: int, n2: int) -> None: timer =", "-> None: L0, L1 = n0, n1 if n2 >= 1: print(L0, end=\"", "0 0 0 0 >>> lucas_sequence(2,1,10) 2 1 3 4 7 11 18", "in range(0, n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 def lucas_sequence_timer(n0:", "False def elapsed(self): if not self.__running: return self.__elapsed else: print('Stopwatch must be stopped')", "-85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if __name__ == \"__main__\": if(sys.argv[3:]): lucas_sequence(int(sys.argv[1]), int(sys.argv[2]),", "n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0:", "end=\" \") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6) 0 0 0 0", "import sys class Stopwatch: def __init__(self): self.reset() def start(self): if not self.__running: self.__start_time", "elapsed = time*1000 elapsed = round(elapsed, 2) print('%.2f ms' % elapsed) else: elapsed", "if not self.__running: return self.__elapsed else: print('Stopwatch must be stopped') return None def", "= self.__elapsed = 0 self.__running = False def elapsed(self): if not self.__running: return", "import sqrt from doctest import testmod import sys class Stopwatch: def __init__(self): self.reset()", "> 0.001: elapsed = time*1000 elapsed = round(elapsed, 2) print('%.2f ms' % elapsed)", "sqrt from doctest import testmod import sys class Stopwatch: def __init__(self): self.reset() def", "time > 1.0: elapsed = round(time, 3) print('%.3f s' % elapsed) elif time", "int) -> None: timer = Stopwatch() timer.start() L0, L1 = n0, n1 for", ">>> lucas_sequence(2308,4261,5) 2308 4261 6569 10830 17399 >>> lucas_sequence(5,-20,6) 5 -20 -15 -35", "print(L1, end=\" \") for i in range(0, n2-2): print(L0+L1, end=\" \") L0, L1", "perf_counter() self.__running = True else: print('Stopwatch already running') def stop(self): if self.__running: self.__elapsed", "1 1 2 3 5 8 13 21 34 55 89 144 >>>", "end=\" \") if n2 >= 2: print(L1, end=\" \") for i in range(0,", "\") def lucas_sequence_last_timer(n0: int, n1: int, n2: int) -> None: timer = Stopwatch()", "numbers # such that L(n) = L(n-1) + L(n-2) from time import perf_counter", "if self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running = False else: print('Stopwatch not running') def", "range(0, n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 timer.stop() print_time(timer.elapsed()) def", "i in range(0, n2-2): print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 timer.stop()", "int, n2: int) -> None: L0, L1 = n0, n1 for i in", "n2: int) -> None: timer = Stopwatch() timer.start() L0, L1 = n0, n1", "self.__running = False def elapsed(self): if not self.__running: return self.__elapsed else: print('Stopwatch must", "489526700523968661124 \"\"\" pass if __name__ == \"__main__\": if(sys.argv[3:]): lucas_sequence(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])) else: testmod()", "timer.start() L0, L1 = n0, n1 for i in range(0, n2-2): L0, L1", "\"\"\" >>> lucas_sequence(0,0,6) 0 0 0 0 0 0 >>> lucas_sequence(2,1,10) 2 1", "0 0 0 >>> lucas_sequence(2,1,10) 2 1 3 4 7 11 18 29", "be stopped') return None def print_time(time:float) -> None: print('\\nElapsed: ', end=\"\") if time", "running') def stop(self): if self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running = False else: print('Stopwatch", "0 0 0 0 0 >>> lucas_sequence(2,1,10) 2 1 3 4 7 11", "print(L0+L1, end=\" \") L0, L1 = L1, L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int,", "n2 >= 1: print(L0, end=\" \") if n2 >= 2: print(L1, end=\" \")", "-> None: L0, L1 = n0, n1 for i in range(0, n2-2): L0,", "time import perf_counter from math import sqrt from doctest import testmod import sys", "def stop(self): if self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running = False else: print('Stopwatch not", "False else: print('Stopwatch not running') def reset(self): self.__start_time = self.__elapsed = 0 self.__running", "lucas_sequence_last_timer(n0: int, n1: int, n2: int) -> None: timer = Stopwatch() timer.start() L0,", "stop(self): if self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running = False else: print('Stopwatch not running')", "not self.__running: return self.__elapsed else: print('Stopwatch must be stopped') return None def print_time(time:float)", "21 34 55 89 144 >>> lucas_sequence(2308,4261,5) 2308 4261 6569 10830 17399 >>>", "int, n1: int, n2: int) -> None: timer = Stopwatch() timer.start() L0, L1", "timer = Stopwatch() timer.start() L0, L1 = n0, n1 for i in range(0,", "None def print_time(time:float) -> None: print('\\nElapsed: ', end=\"\") if time > 1.0: elapsed", "= L1, L0+L1 print(L1, end=\" \") timer.stop() print_time(timer.elapsed()) def test_lucas_sequence(): \"\"\" >>> lucas_sequence(0,0,6)", "0 0 >>> lucas_sequence(2,1,10) 2 1 3 4 7 11 18 29 47", "self.__elapsed = 0 self.__running = False def elapsed(self): if not self.__running: return self.__elapsed", "self.__running = False else: print('Stopwatch not running') def reset(self): self.__start_time = self.__elapsed =", "L0, L1 = L1, L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1: int, n2:", "n0, n1 for i in range(0, n2-2): L0, L1 = L1, L0+L1 print(L1,", "L1 = L1, L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1: int, n2: int)", "self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running = False else: print('Stopwatch not running') def reset(self):", "-35 -50 -85 >>> lucas_sequence_last(2,1,100) 489526700523968661124 \"\"\" pass if __name__ == \"__main__\": if(sys.argv[3:]):", "a sequence of numbers # such that L(n) = L(n-1) + L(n-2) from", "elapsed = round(time, 3) print('%.3f s' % elapsed) elif time > 0.001: elapsed", "self.__start_time = perf_counter() self.__running = True else: print('Stopwatch already running') def stop(self): if", "sequence of numbers # such that L(n) = L(n-1) + L(n-2) from time", "L(n-2) from time import perf_counter from math import sqrt from doctest import testmod", "import testmod import sys class Stopwatch: def __init__(self): self.reset() def start(self): if not", "time*1000000 elapsed = round(elapsed, 2) print('%.2f µs' % elapsed) def lucas_sequence(n0: int, n1:", "= perf_counter() self.__running = True else: print('Stopwatch already running') def stop(self): if self.__running:", "L1 = n0, n1 for i in range(0, n2-2): L0, L1 = L1,", "n0, n1 if n2 >= 1: print(L0, end=\" \") if n2 >= 2:", "if n2 >= 2: print(L1, end=\" \") for i in range(0, n2-2): print(L0+L1,", "int, n1: int, n2: int) -> None: L0, L1 = n0, n1 if", "% elapsed) def lucas_sequence(n0: int, n1: int, n2: int) -> None: L0, L1", "def lucas_sequence_last(n0: int, n1: int, n2: int) -> None: L0, L1 = n0,", "89 144 >>> lucas_sequence(2308,4261,5) 2308 4261 6569 10830 17399 >>> lucas_sequence(5,-20,6) 5 -20", "Sequence L is a sequence of numbers # such that L(n) = L(n-1)", "n2 >= 2: print(L1, end=\" \") for i in range(0, n2-2): print(L0+L1, end=\"", "Stopwatch: def __init__(self): self.reset() def start(self): if not self.__running: self.__start_time = perf_counter() self.__running", "print('\\nElapsed: ', end=\"\") if time > 1.0: elapsed = round(time, 3) print('%.3f s'", "# Lucas Sequence L is a sequence of numbers # such that L(n)", "is a sequence of numbers # such that L(n) = L(n-1) + L(n-2)", "math import sqrt from doctest import testmod import sys class Stopwatch: def __init__(self):", "lucas_sequence(1,1,12) 1 1 2 3 5 8 13 21 34 55 89 144", "True else: print('Stopwatch already running') def stop(self): if self.__running: self.__elapsed += perf_counter()-self.__start_time self.__running", "not running') def reset(self): self.__start_time = self.__elapsed = 0 self.__running = False def", "29 47 76 >>> lucas_sequence(1,1,12) 1 1 2 3 5 8 13 21", "timer = Stopwatch() timer.start() L0, L1 = n0, n1 if n2 >= 1:", "\") L0, L1 = L1, L0+L1 timer.stop() print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1: int,", "start(self): if not self.__running: self.__start_time = perf_counter() self.__running = True else: print('Stopwatch already", "int, n2: int) -> None: timer = Stopwatch() timer.start() L0, L1 = n0,", "= time*1000000 elapsed = round(elapsed, 2) print('%.2f µs' % elapsed) def lucas_sequence(n0: int,", "% elapsed) else: elapsed = time*1000000 elapsed = round(elapsed, 2) print('%.2f µs' %", "print_time(timer.elapsed()) def lucas_sequence_last(n0: int, n1: int, n2: int) -> None: L0, L1 =" ]
[ "= prevAdjusted # Then edit the time offset value adjustedTime = int(fields[0])+timeOffset #", "= self.calibrate, text = \"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x, command = partial(self.singlePlot,True), text", "TOP) Label(b, text = \"Slope\", padx = 15).pack(side = TOP) Entry(b, textvariable =", "xrange(counter,len(content)-1): ### Starting on the line number found from previous loop y =", "Necessary formatting for InField compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) + n + 'Program", "list of sensors if b.checkVar.get(): ### if box is checked p = Process(target", "stringFormat = \"{} \\t {}\" def __init__(self,ip,port,master): ###### Tkinter Varibales self.x = Frame()", "= data.decode('utf-8') lineSplit = packetSplit.split('\\n') for line in lineSplit: fields = line.split(',') calibratedData", "15).grid(column = 0,row = 0) Label(c, text = \"Torque (in-lbs)\", padx = 15).grid(column", "LEFT) b = Frame(t) b.pack(side = RIGHT) c = Frame(a) c.pack(side = TOP)", "text = \"Add Point\").pack(side = LEFT,fill = X, padx = 10) Button(master =", "= \"Add Bridge\", command = self.addBridge, width = 18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove", "Entry(b, textvariable = self.yIntercept).pack(side = TOP) Button(b, command = partial(self.exitWindow,t), text = \"OK\").pack(side", "= 18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove = Button(self.bottomFrame, text = \"Remove Bridge\", command", "self.createWidgets() def createWidgets(self) : x = Entry(self, textvariable = self.bitValue, width = 8)", "textvariable = self.yIntercept).pack(side = TOP) Button(b, command = partial(self.exitWindow,t), text = \"OK\").pack(side =", "# UDP sock.bind((self.ip, (self.portVar.get()))) ### File Setup fileLog = open(self.filePathVar.get(), \"wb\") ### Necessary", "fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\"", "output files are formatted to be imported into the InField data analysis software.", "8, bd = 2, relief = GROOVE) self.topFrame.pack(side = TOP, fill = X)", "LEFT, fill = X, padx = 10) Button(master = b, command = partial(self.linReg),", "__init__(self, master): self.bitValue = DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets()", "for i in xrange(3): self.addPoint(a) else: tempList = self.pointList # Store points in", "{}\" def __init__(self,ip,port,master): ###### Tkinter Varibales self.x = Frame() self.isLogging = BooleanVar() self.checkVar", "= GROOVE) self.bottomFrame.pack(side = BOTTOM, fill = X) self.bridgeButton = Button(self.bottomFrame, text =", "\"+ str(x.torqueValue.get())) # Finds slope and y intercept from calibration point cloud def", "7/16/2016 # Description: The program reads wirelessly transmitted data from multiple sensors and", "= \"Y Intercept\", padx = 15).pack(side = TOP) Entry(b, textvariable = self.yIntercept).pack(side =", "logging for all selected bridges def startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles", "+ chr(10) + \"\" class SensorNetwork(Frame): def __init__(self,master=None): ### List of process objects", "fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\"", "self.bridges = [] ###### Tkinter variables self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar()", "+ 'Program Start Time: ' + str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values: Slope =", "b.pack(side = RIGHT) c = Frame(a) c.pack(side = TOP) d = Frame(a) d.pack(side", "partial(self.linReg), text = \"Calibrate!\").pack(side = TOP) Label(b, text = \"Slope\", padx = 15).pack(side", "checked p = Process(target = b.startLogging) self.processes.append(p) p.start() def stopLogging(self): print (\"Stopping Data", "for line in lineSplit: fields = line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if", "check = Checkbutton(self.x,text = \"Include\",variable = self.checkVar) check.pack(side=LEFT) L1 = Label(self.x, text =", "on Port: ' + str(self.portVar.get())) self.isLogging.set(True) ### Network Connection sock = socket.socket(socket.AF_INET, #", "only def printEntry(self): for x in self.pointList: print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque Value", "if len(self.pointList) == 0: # If the list of calibration points is empty", "[] ###### Tkinter variables self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ###### GUI", "the processor clock has overflowed timeOffset = prevAdjusted # Then edit the time", "Empty out list for x in tempList: # Copy points over temp =", "as plt from multiprocessing import Process n = chr(13) + chr(10) + \"\"", "= chr(13) + chr(10) + \"\" class SensorNetwork(Frame): def __init__(self,master=None): ### List of", "= X, padx = 10) Button(master = d, command = partial(self.removePoint), text =", "printEntry(self): for x in self.pointList: print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque Value value: \"+", "writes them to \".txt\" files. # The output files are formatted to be", "Entry(self, textvariable = self.torqueValue, width = 8) y.pack(side = LEFT, padx = 10,", "command = partial(self.linReg), text = \"Calibrate!\").pack(side = TOP) Label(b, text = \"Slope\", padx", "= X) self.startButton = Button(self.topFrame, text = \"Start Logging\", command = self.startLogging, width", "fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" + n) isFirst = True ###", "list of calibration points is empty for i in xrange(3): self.addPoint(a) else: tempList", "the InField data analysis software. import socket import serial # import Serial Library", "Open window t.wm_title(\"PORT: \" + str(self.portVar.get()) + \" Calibration\") a = Frame(t) a.pack(side", "command = self.stopLogging, width = 18, state = DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame,", "str(self.portVar.get()) + \" Calibration\") a = Frame(t) a.pack(side = LEFT) b = Frame(t)", "= 0) if len(self.pointList) == 0: # If the list of calibration points", "= DISABLED) for p in self.processes: ### Iterate through list of process objects", "Value\", padx = 15).grid(column = 0,row = 0) Label(c, text = \"Torque (in-lbs)\",", "Show the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() class Bridge():", "process p.join() def plotMultiple(self): for b in self.bridges: if b.checkVar.get(): xy = b.singlePlot(False)", "Interface Initialization Frame.__init__(self.x,master, bd = 2, padx = 3, pady = 3) self.x.pack(side=LEFT)", "= 10) Button(master = d, command = partial(self.removePoint), text = \"Remove Point\").pack(side =", "# Open window t.wm_title(\"PORT: \" + str(self.portVar.get()) + \" Calibration\") a = Frame(t)", "### List of process objects for parallel computing self.processes = [] ### List", "1024 bytes packetSplit = data.decode('utf-8') lineSplit = packetSplit.split('\\n') for line in lineSplit: fields", "10, pady = 2) y = Entry(self, textvariable = self.torqueValue, width = 8)", "# Import numpy import sys import os from Tkinter import * import tkMessageBox", "of calibration points is empty for i in xrange(3): self.addPoint(a) else: tempList =", "+ n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" +", "+ n) isFirst = True ### Boolean to track start time (time offset)", "packetSplit = data.decode('utf-8') lineSplit = packetSplit.split('\\n') for line in lineSplit: fields = line.split(',')", "analysis software. import socket import serial # import Serial Library from numpy import", "prevAdjusted = adjustedTime def createWidgets(self): check = Checkbutton(self.x,text = \"Include\",variable = self.checkVar) check.pack(side=LEFT)", "start time (time offset) timeOffset = 0 prevTime = 0 prevAdjusted = 0", "### Boolean to track start time (time offset) timeOffset = 0 prevTime =", "= IntVar() self.portVar = IntVar() self.portVar.set(port) self.filePathVar = StringVar() ###### Variables self.ip =", "previous loop y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque", "singlePlot method for each instance plt.plot(xy[0],xy[1]) ### Show the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque", "self.yIntercept.set(y) # Exits a top level window def exitWindow(self, frame): frame.withdraw() def reopenWindow(self,", "\" + str(self.mSlope.get()) + \", Y-Intercept = \" + str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\"", "= \"Add Point\").pack(side = LEFT,fill = X, padx = 10) Button(master = d,", "isFirst = True ### Boolean to track start time (time offset) timeOffset =", "+ 1 if line.find(\"DM_Start=\") != -1: break for x in xrange(counter,len(content)-1): ### Starting", "= [] ###### Tkinter variables self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ######", "x in tempList: # Copy points over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp)", "Initialize with one bridge Label(self.bottomFrame, text = \"Neapco Components LLC: <NAME>\\t2016\", font =", "0 self.bitPoints = [] self.torquePoints = [] self.isFirstCalib = True self.bitEntryList = []", "\"Add Point\").pack(side = LEFT,fill = X, padx = 10) Button(master = d, command", "Button(self.bottomFrame, text = \"Add Bridge\", command = self.addBridge, width = 18) self.bridgeButton.pack(side =", "Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get()))) ### File Setup fileLog = open(self.filePathVar.get(), \"wb\")", "= TOP) # Pack it to the top of the window self.bridges.append(a) #", "fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData) + n) prevTime = int(fields[0]) prevAdjusted = adjustedTime", "processor clock has overflowed timeOffset = prevAdjusted # Then edit the time offset", "def addPoint(self, frame): x = calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() # Used for", "SensorNetwork(Frame): def __init__(self,master=None): ### List of process objects for parallel computing self.processes =", "= 8, pady = 8, bd = 2, relief = GROOVE) self.bottomFrame.pack(side =", "### Loop through list of sensors if b.checkVar.get(): ### if box is checked", "= Frame() self.isLogging = BooleanVar() self.checkVar = IntVar() self.portVar = IntVar() self.portVar.set(port) self.filePathVar", "= \" + str(self.mSlope.get()) + \", Y-Intercept = \" + str(self.yIntercept.get()) + n)", "in xrange(3): self.addPoint(a) else: tempList = self.pointList # Store points in temporary list", "= 18, state = DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame, text = \"Multi-Plot\", command", "y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time", "width = 18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove = Button(self.bottomFrame, text = \"Remove Bridge\",", "w = linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits", "= Checkbutton(self.x,text = \"Include\",variable = self.checkVar) check.pack(side=LEFT) L1 = Label(self.x, text = \"", "process objects for parallel computing self.processes = [] ### List of bridges self.bridges", "= \"Stop Logging\", command = self.stopLogging, width = 18, state = DISABLED) self.stopButton.pack(side", "self.addBridge() # Initialize with one bridge Label(self.bottomFrame, text = \"Neapco Components LLC: <NAME>\\t2016\",", "Button(self.x, command = self.calibrate, text = \"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x, command =", "on the very first packet and store it in to timeOffset isFirst =", "torque = [] counter = 0 for line in content: ### Find which", "prevTime = int(fields[0]) prevAdjusted = adjustedTime def createWidgets(self): check = Checkbutton(self.x,text = \"Include\",variable", "= 15).grid(column = 2, row = 0) if len(self.pointList) == 0: # If", "self.addPoint(a) else: tempList = self.pointList # Store points in temporary list self.pointList =", "plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() return (time,torque) def saveAs(self): print 'Please Select", "array, ones, linalg # Import numpy import sys import os from Tkinter import", "= LEFT) b = Frame(t) b.pack(side = RIGHT) c = Frame(a) c.pack(side =", "self.topFrame.pack(side = TOP, fill = X) self.startButton = Button(self.topFrame, text = \"Start Logging\",", "fill = X) self.startButton = Button(self.topFrame, text = \"Start Logging\", command = self.startLogging,", "a.pack(side = LEFT) b = Frame(t) b.pack(side = RIGHT) c = Frame(a) c.pack(side", "ones, linalg # Import numpy import sys import os from Tkinter import *", "15).pack(side = TOP) Entry(b, textvariable = self.mSlope).pack(side = TOP) Label(b, text = \"Y", "Usage: The program simultaneously reads multiple UDP streams and writes them to \".txt\"", "# Date: 7/16/2016 # Description: The program reads wirelessly transmitted data from multiple", "array([temp1,ones(len(temp1))]) B = array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y = round(float(w[1]),3)", "\"wb\") ### Necessary formatting for InField compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) + n", "Variables self.ip = ip ###### Interface Initialization Frame.__init__(self.x,master, bd = 2, padx =", "Description: The program reads wirelessly transmitted data from multiple sensors and saves it", "= Button(self.bottomFrame, text = \"Add Bridge\", command = self.addBridge, width = 18) self.bridgeButton.pack(side", "= self.startLogging, width = 18) self.startButton.pack(side = LEFT) self.stopButton = Button(self.topFrame, text =", "and writes them to \".txt\" files. # The output files are formatted to", "plt from multiprocessing import Process n = chr(13) + chr(10) + \"\" class", "Date: 7/16/2016 # Description: The program reads wirelessly transmitted data from multiple sensors", "found from previous loop y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time", "= 0,row = 0) Label(c, text = \"Torque (in-lbs)\", padx = 15).grid(column =", "NORMAL) for b in self.bridges: ### Loop through list of sensors if b.checkVar.get():", "plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() class Bridge(): stringFormat =", "<NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self): a = Bridge(self.ip.get(),0, master=self) #", "the local SD card. # Usage: The program simultaneously reads multiple UDP streams", "* import tkMessageBox from tkFileDialog import asksaveasfilename import datetime import thread from functools", "Button(self.topFrame, text = \"Multi-Plot\", command = self.plotMultiple).pack(side = LEFT) self.bottomFrame = Frame(master=self,padx =", "0 for line in content: ### Find which line the data starts on", "self.isFirstCalib = True self.bitEntryList = [] self.torqueEntryList = [] self.pointList = [] ####", "points is empty for i in xrange(3): self.addPoint(a) else: tempList = self.pointList #", "it in to timeOffset isFirst = False if(int(fields[0]) < prevTime): # If the", "\" PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x, width = 5, textvariable = self.portVar) portEntry.pack(side=LEFT)", "Button(self.topFrame, text = \"Stop Logging\", command = self.stopLogging, width = 18, state =", "= Entry(self, textvariable = self.torqueValue, width = 8) y.pack(side = LEFT, padx =", "Button(self.bottomFrame, text = \"Remove Bridge\", command = self.removeBridge, width = 18) self.bridgeRemove.pack(side =", "= \"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x, command = partial(self.singlePlot,True), text = \"Plot\").pack(side =", "padx = 10) Button(master = b, command = partial(self.linReg), text = \"Calibrate!\").pack(side =", "chr(10) + \"\" class SensorNetwork(Frame): def __init__(self,master=None): ### List of process objects for", "Intercept\", padx = 15).pack(side = TOP) Entry(b, textvariable = self.yIntercept).pack(side = TOP) Button(b,", "Copy points over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d, command", "calibration point cloud def linReg(self): temp1 = [] temp2 = [] for x", "fill = X, padx = 10) Button(master = b, command = partial(self.linReg), text", "text = \"OK\").pack(side = BOTTOM) def singlePlot(self, show): f = open(self.filePathVar.get()) content =", "self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ###### GUI Initialization Frame.__init__(self,master, bd =", "2) return ###### Running code if __name__ == '__main__': root = Tk() root.wm_title(\"Gage", "self.bitValue = DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets() def createWidgets(self)", "Connection sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get()))) ### File", "fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\"", "code if __name__ == '__main__': root = Tk() root.wm_title(\"Gage Logger\") app = SensorNetwork(master=root)", "if line.find(\"DM_Start=\") != -1: break for x in xrange(counter,len(content)-1): ### Starting on the", "Loop through list of sensors if b.checkVar.get(): ### if box is checked p", "fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\"", "BOTTOM) def singlePlot(self, show): f = open(self.filePathVar.get()) content = f.readlines() time = []", "TOP) #self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self, padx = 8,pady = 8, bd =", "n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n)", "row = 0) if len(self.pointList) == 0: # If the list of calibration", "line the data starts on counter = counter + 1 if line.find(\"DM_Start=\") !=", "if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() return (time,torque)", "True self.bitEntryList = [] self.torqueEntryList = [] self.pointList = [] #### Starts Writing", "X) self.bridgeButton = Button(self.bottomFrame, text = \"Add Bridge\", command = self.addBridge, width =", "+ n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" +", "\" File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width = 35,textvariable = self.filePathVar, text = self.filePathVar.get())", "self.pointList = [] # Empty out list for x in tempList: # Copy", "= round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits a top level window def exitWindow(self, frame):", "addr = sock.recvfrom(1024) # buffer size is 1024 bytes packetSplit = data.decode('utf-8') lineSplit", "plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() class Bridge(): stringFormat = \"{} \\t {}\"", "Button(master = d, command = partial(self.addPoint,a), text = \"Add Point\").pack(side = LEFT,fill =", "Exits a top level window def exitWindow(self, frame): frame.withdraw() def reopenWindow(self, frame): frame.update()", "b in self.bridges: if b.checkVar.get(): xy = b.singlePlot(False) ### Call the singlePlot method", "calibrateButton = Button(self.x, command = self.calibrate, text = \"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x,", "= LEFT) self.bottomFrame = Frame(master=self,padx = 8, pady = 8, bd = 2,", "File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width = 35,textvariable = self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT)", "= f.readlines() time = [] torque = [] counter = 0 for line", "= socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get()))) ### File Setup fileLog", "def startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may be overwritten\"): return self.startButton.configure(state", "10, pady = 2) return ###### Running code if __name__ == '__main__': root", "into the InField data analysis software. import socket import serial # import Serial", "#### Starts Writing to File def startLogging(self): print('Sampling system on Port: ' +", "TOP) Entry(b, textvariable = self.yIntercept).pack(side = TOP) Button(b, command = partial(self.exitWindow,t), text =", "if not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may be overwritten\"): return self.startButton.configure(state = DISABLED)", "tkMessageBox from tkFileDialog import asksaveasfilename import datetime import thread from functools import partial", "n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n)", "command = self.addBridge, width = 18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove = Button(self.bottomFrame, text", "(self.portVar.get()))) ### File Setup fileLog = open(self.filePathVar.get(), \"wb\") ### Necessary formatting for InField", "0: # If the list of calibration points is empty for i in", "points in temporary list self.pointList = [] # Empty out list for x", "self.saveAs, text = \"Browse...\") browseButton.pack(side = LEFT) calibrateButton = Button(self.x, command = self.calibrate,", "15).pack(side = TOP) Entry(b, textvariable = self.yIntercept).pack(side = TOP) Button(b, command = partial(self.exitWindow,t),", "starts logging for all selected bridges def startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are you", "= StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ###### GUI Initialization Frame.__init__(self,master, bd = 10)", "self.pointCount = 0 self.bitPoints = [] self.torquePoints = [] self.isFirstCalib = True self.bitEntryList", "text = \"Neapco Components LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self):", "self.yIntercept.get(),1) if isFirst: timeOffset = (-1)*int(fields[0]) # Take the time on the very", "tkFileDialog import asksaveasfilename import datetime import thread from functools import partial import matplotlib.pyplot", "numpy import array, ones, linalg # Import numpy import sys import os from", "vs. Torque\") plt.show() class Bridge(): stringFormat = \"{} \\t {}\" def __init__(self,ip,port,master): ######", "= X, padx = 10) Button(master = b, command = partial(self.linReg), text =", "font = (\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self): a = Bridge(self.ip.get(),0, master=self) # Create", "= LEFT) self.stopButton = Button(self.topFrame, text = \"Stop Logging\", command = self.stopLogging, width", "GUI Initialization Frame.__init__(self,master, bd = 10) self.pack(side = TOP) #self.wm_title(\"Feather Receiver\") self.topFrame =", "reopenWindow(self, frame): frame.update() frame.reiconify() class calibrationPoint(Frame) : def __init__(self, master): self.bitValue = DoubleVar(0)", "TOP) Button(b, command = partial(self.exitWindow,t), text = \"OK\").pack(side = BOTTOM) def singlePlot(self, show):", "n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n)", "in lineSplit: fields = line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst: timeOffset", "str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values: Slope = \" + str(self.mSlope.get()) + \", Y-Intercept", "Simultaneously starts logging for all selected bridges def startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are", "= 0 while True: ### Read packets until told to stop data, addr", "from multiple sensors and saves it on the local SD card. # Usage:", "= DoubleVar(0) self.pointCount = 0 self.bitPoints = [] self.torquePoints = [] self.isFirstCalib =", "width = 18) self.startButton.pack(side = LEFT) self.stopButton = Button(self.topFrame, text = \"Stop Logging\",", "lineSplit = packetSplit.split('\\n') for line in lineSplit: fields = line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1])", "If the list of calibration points is empty for i in xrange(3): self.addPoint(a)", "self.bridgeButton = Button(self.bottomFrame, text = \"Add Bridge\", command = self.addBridge, width = 18)", "break for x in xrange(counter,len(content)-1): ### Starting on the line number found from", "plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() return (time,torque) def saveAs(self):", "reads wirelessly transmitted data from multiple sensors and saves it on the local", "by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData) + n) prevTime = int(fields[0])", "File Setup fileLog = open(self.filePathVar.get(), \"wb\") ### Necessary formatting for InField compatibility fileLog.write(\"DM_TestTitle=\"", "Library from numpy import array, ones, linalg # Import numpy import sys import", "value: \"+ str(x.torqueValue.get())) # Finds slope and y intercept from calibration point cloud", "command = partial(self.addPoint,a), text = \"Add Point\").pack(side = LEFT,fill = X, padx =", "temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d, command = partial(self.addPoint,a), text = \"Add Point\").pack(side", "import matplotlib.pyplot as plt from multiprocessing import Process n = chr(13) + chr(10)", "str(calibratedData) + n) prevTime = int(fields[0]) prevAdjusted = adjustedTime def createWidgets(self): check =", "= X) self.bridgeButton = Button(self.bottomFrame, text = \"Add Bridge\", command = self.addBridge, width", "Boolean to track start time (time offset) timeOffset = 0 prevTime = 0", "str(self.portVar.get())) self.isLogging.set(True) ### Network Connection sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP", "= Frame(t) a.pack(side = LEFT) b = Frame(t) b.pack(side = RIGHT) c =", "packetSplit.split('\\n') for line in lineSplit: fields = line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1)", "self.bridges.pop().x.pack_forget() ### Simultaneously starts logging for all selected bridges def startLogging(self): if not", "text = \"Add Bridge\", command = self.addBridge, width = 18) self.bridgeButton.pack(side = LEFT)", "Entry(b, textvariable = self.mSlope).pack(side = TOP) Label(b, text = \"Y Intercept\", padx =", "and saves it on the local SD card. # Usage: The program simultaneously", "text = \"Calibrate!\").pack(side = TOP) Label(b, text = \"Slope\", padx = 15).pack(side =", "+ n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" +", "in self.bridges: ### Loop through list of sensors if b.checkVar.get(): ### if box", "text = \"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x, command = partial(self.singlePlot,True), text = \"Plot\").pack(side", "and store it in to timeOffset isFirst = False if(int(fields[0]) < prevTime): #", "streams and writes them to \".txt\" files. # The output files are formatted", "GROOVE) self.bottomFrame.pack(side = BOTTOM, fill = X) self.bridgeButton = Button(self.bottomFrame, text = \"Add", "= \"Plot\").pack(side = LEFT) def calibrate(self): #if len(self.pointList) is not 0: t =", "== 0: # If the list of calibration points is empty for i", "startLogging(self): print('Sampling system on Port: ' + str(self.portVar.get())) self.isLogging.set(True) ### Network Connection sock", "width = 18) self.bridgeRemove.pack(side = RIGHT) self.addBridge() # Initialize with one bridge Label(self.bottomFrame,", "= TOP) self.createWidgets() def createWidgets(self) : x = Entry(self, textvariable = self.bitValue, width", "+ n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" + n) isFirst =", "By: <NAME> # Date: 7/16/2016 # Description: The program reads wirelessly transmitted data", "+ '\\t' + str(calibratedData) + n) prevTime = int(fields[0]) prevAdjusted = adjustedTime def", "thread from functools import partial import matplotlib.pyplot as plt from multiprocessing import Process", "= 0 prevTime = 0 prevAdjusted = 0 while True: ### Read packets", "removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts logging for all selected bridges def startLogging(self): if", "self.addBridge, width = 18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove = Button(self.bottomFrame, text = \"Remove", "Data Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state = DISABLED) for p in self.processes: ###", "the window self.bridges.append(a) # Add the object to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ###", "for debugging only def printEntry(self): for x in self.pointList: print(\"bit value: \"+ str(x.bitValue.get()))", "calibrateButton.pack(side = LEFT) Button(self.x, command = partial(self.singlePlot,True), text = \"Plot\").pack(side = LEFT) def", "in temporary list self.pointList = [] # Empty out list for x in", "DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets() def createWidgets(self) : x = Entry(self, textvariable", "[] # Empty out list for x in tempList: # Copy points over", "Tkinter import * import tkMessageBox from tkFileDialog import asksaveasfilename import datetime import thread", "Label(c, text = \"Bit Value\", padx = 15).grid(column = 0,row = 0) Label(c,", "len(self.pointList) == 0: # If the list of calibration points is empty for", "fileLog.write(str(self.file_path) + n + 'Program Start Time: ' + str(datetime.datetime.now()) + n) fileLog.write(\"Calibration", "= self.saveAs, text = \"Browse...\") browseButton.pack(side = LEFT) calibrateButton = Button(self.x, command =", "variables self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ###### GUI Initialization Frame.__init__(self,master, bd", "for b in self.bridges: ### Loop through list of sensors if b.checkVar.get(): ###", "import partial import matplotlib.pyplot as plt from multiprocessing import Process n = chr(13)", "def saveAs(self): print 'Please Select File:' self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes =", "system on Port: ' + str(self.portVar.get())) self.isLogging.set(True) ### Network Connection sock = socket.socket(socket.AF_INET,", "Torque\") plt.show() class Bridge(): stringFormat = \"{} \\t {}\" def __init__(self,ip,port,master): ###### Tkinter", "Running code if __name__ == '__main__': root = Tk() root.wm_title(\"Gage Logger\") app =", "imported into the InField data analysis software. import socket import serial # import", "< prevTime): # If the processor clock has overflowed timeOffset = prevAdjusted #", "18) self.bridgeRemove.pack(side = RIGHT) self.addBridge() # Initialize with one bridge Label(self.bottomFrame, text =", "\"Torque (in-lbs)\", padx = 15).grid(column = 2, row = 0) if len(self.pointList) ==", "plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() return (time,torque) def saveAs(self): print", "time on the very first packet and store it in to timeOffset isFirst", "open(self.filePathVar.get(), \"wb\") ### Necessary formatting for InField compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) +", "filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame): x = calibrationPoint(frame) self.pointList.append(x) def", "StringVar() ###### Variables self.ip = ip ###### Interface Initialization Frame.__init__(self.x,master, bd = 2,", "= 0 prevAdjusted = 0 while True: ### Read packets until told to", "show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() return (time,torque) def", "= self.plotMultiple).pack(side = LEFT) self.bottomFrame = Frame(master=self,padx = 8, pady = 8, bd", "return ###### Running code if __name__ == '__main__': root = Tk() root.wm_title(\"Gage Logger\")", "buffer size is 1024 bytes packetSplit = data.decode('utf-8') lineSplit = packetSplit.split('\\n') for line", "\"+ str(x.bitValue.get())) print(\"Torque Value value: \"+ str(x.torqueValue.get())) # Finds slope and y intercept", "= [] self.isFirstCalib = True self.bitEntryList = [] self.torqueEntryList = [] self.pointList =", "timeOffset = (-1)*int(fields[0]) # Take the time on the very first packet and", "def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts logging for all selected bridges def startLogging(self):", "Frame.__init__(self.x,master, bd = 2, padx = 3, pady = 3) self.x.pack(side=LEFT) self.createWidgets() ######", "calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst: timeOffset = (-1)*int(fields[0]) # Take the", "= Button(self.bottomFrame, text = \"Remove Bridge\", command = self.removeBridge, width = 18) self.bridgeRemove.pack(side", "= 2) y = Entry(self, textvariable = self.torqueValue, width = 8) y.pack(side =", "fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\"", "pady = 8, bd = 2, relief = GROOVE) self.bottomFrame.pack(side = BOTTOM, fill", "socket import serial # import Serial Library from numpy import array, ones, linalg", "new bridge object a.x.pack(side = TOP) # Pack it to the top of", "= 0) Label(c, text = \"Torque (in-lbs)\", padx = 15).grid(column = 2, row", "def singlePlot(self, show): f = open(self.filePathVar.get()) content = f.readlines() time = [] torque", "data analysis software. import socket import serial # import Serial Library from numpy", "Add the object to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts logging for", "0 prevTime = 0 prevAdjusted = 0 while True: ### Read packets until", "\"Remove Bridge\", command = self.removeBridge, width = 18) self.bridgeRemove.pack(side = RIGHT) self.addBridge() #", "= adjustedTime def createWidgets(self): check = Checkbutton(self.x,text = \"Include\",variable = self.checkVar) check.pack(side=LEFT) L1", "self.yIntercept = DoubleVar(0) self.pointCount = 0 self.bitPoints = [] self.torquePoints = [] self.isFirstCalib", "\"Plot\").pack(side = LEFT) def calibrate(self): #if len(self.pointList) is not 0: t = Toplevel(self.x)", "BooleanVar() ###### GUI Initialization Frame.__init__(self,master, bd = 10) self.pack(side = TOP) #self.wm_title(\"Feather Receiver\")", "Point\").pack(side = LEFT, fill = X, padx = 10) Button(master = b, command", "for x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B = array([temp2]) w", "Iterate through list of process objects p.terminate() ### Terminate each process p.join() def", "m = round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits a top level", "+ n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" +", "self.torqueEntryList = [] self.pointList = [] #### Starts Writing to File def startLogging(self):", "prevAdjusted # Then edit the time offset value adjustedTime = int(fields[0])+timeOffset # Shift", "Entry(self.x, width = 5, textvariable = self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x, text =", "packet and store it in to timeOffset isFirst = False if(int(fields[0]) < prevTime):", "self.pack(side = TOP) #self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self, padx = 8,pady = 8,", "<NAME> # Date: 7/16/2016 # Description: The program reads wirelessly transmitted data from", "\"Start Logging\", command = self.startLogging, width = 18) self.startButton.pack(side = LEFT) self.stopButton =", "List of process objects for parallel computing self.processes = [] ### List of", "from calibration point cloud def linReg(self): temp1 = [] temp2 = [] for", "the line number found from previous loop y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if", "top level window def exitWindow(self, frame): frame.withdraw() def reopenWindow(self, frame): frame.update() frame.reiconify() class", "plt.show() class Bridge(): stringFormat = \"{} \\t {}\" def __init__(self,ip,port,master): ###### Tkinter Varibales", "check.pack(side=LEFT) L1 = Label(self.x, text = \" PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x, width", "len(self.pointList) is not 0: t = Toplevel(self.x) # Open window t.wm_title(\"PORT: \" +", "Calibration\") a = Frame(t) a.pack(side = LEFT) b = Frame(t) b.pack(side = RIGHT)", "= [] self.torqueEntryList = [] self.pointList = [] #### Starts Writing to File", "= True self.bitEntryList = [] self.torqueEntryList = [] self.pointList = [] #### Starts", "1 if line.find(\"DM_Start=\") != -1: break for x in xrange(counter,len(content)-1): ### Starting on", "master=self) # Create new bridge object a.x.pack(side = TOP) # Pack it to", "def plotMultiple(self): for b in self.bridges: if b.checkVar.get(): xy = b.singlePlot(False) ### Call", "LEFT) self.bridgeRemove = Button(self.bottomFrame, text = \"Remove Bridge\", command = self.removeBridge, width =", "self.removeBridge, width = 18) self.bridgeRemove.pack(side = RIGHT) self.addBridge() # Initialize with one bridge", "0,row = 0) Label(c, text = \"Torque (in-lbs)\", padx = 15).grid(column = 2,", "return (time,torque) def saveAs(self): print 'Please Select File:' self.file_path = asksaveasfilename(defaultextension = '.txt',", "round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits a top level window def", "fileEntry = Entry(self.x,width = 35,textvariable = self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton =", "+ n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" +", "time offset value adjustedTime = int(fields[0])+timeOffset # Shift every subsequent packet by the", "LEFT) self.bottomFrame = Frame(master=self,padx = 8, pady = 8, bd = 2, relief", "data starts on counter = counter + 1 if line.find(\"DM_Start=\") != -1: break", "one bridge Label(self.bottomFrame, text = \"Neapco Components LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side =", "content: ### Find which line the data starts on counter = counter +", "[] self.isFirstCalib = True self.bitEntryList = [] self.torqueEntryList = [] self.pointList = []", "X, padx = 10) Button(master = b, command = partial(self.linReg), text = \"Calibrate!\").pack(side", "while True: ### Read packets until told to stop data, addr = sock.recvfrom(1024)", "temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d, command = partial(self.addPoint,a), text = \"Add Point\").pack(side =", "\"Add Bridge\", command = self.addBridge, width = 18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove =", ": x = Entry(self, textvariable = self.bitValue, width = 8) x.pack(side = LEFT,", "else: tempList = self.pointList # Store points in temporary list self.pointList = []", "IntVar() self.portVar = IntVar() self.portVar.set(port) self.filePathVar = StringVar() ###### Variables self.ip = ip", "self.startButton.configure(state = NORMAL) self.stopButton.configure(state = DISABLED) for p in self.processes: ### Iterate through", "= True ### Boolean to track start time (time offset) timeOffset = 0", "def addBridge(self): a = Bridge(self.ip.get(),0, master=self) # Create new bridge object a.x.pack(side =", "fill = X) self.bridgeButton = Button(self.bottomFrame, text = \"Add Bridge\", command = self.addBridge,", "formatted to be imported into the InField data analysis software. import socket import", "Start Time: ' + str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values: Slope = \" +", "= BOTTOM) Label(c, text = \"Bit Value\", padx = 15).grid(column = 0,row =", "Bridge(): stringFormat = \"{} \\t {}\" def __init__(self,ip,port,master): ###### Tkinter Varibales self.x =", "simultaneously reads multiple UDP streams and writes them to \".txt\" files. # The", "BooleanVar() self.checkVar = IntVar() self.portVar = IntVar() self.portVar.set(port) self.filePathVar = StringVar() ###### Variables", "\" + str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" +", "in to timeOffset isFirst = False if(int(fields[0]) < prevTime): # If the processor", "very first packet and store it in to timeOffset isFirst = False if(int(fields[0])", "the singlePlot method for each instance plt.plot(xy[0],xy[1]) ### Show the plot plt.xlabel(\"Time (microseconds)\")", "+ \"\" class SensorNetwork(Frame): def __init__(self,master=None): ### List of process objects for parallel", "text = \"Remove Point\").pack(side = LEFT, fill = X, padx = 10) Button(master", "track start time (time offset) timeOffset = 0 prevTime = 0 prevAdjusted =", "[] self.torqueEntryList = [] self.pointList = [] #### Starts Writing to File def", "18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove = Button(self.bottomFrame, text = \"Remove Bridge\", command =", "bridges def startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may be overwritten\"): return", "# Created By: <NAME> # Date: 7/16/2016 # Description: The program reads wirelessly", "Components LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self): a = Bridge(self.ip.get(),0,", "2, relief = GROOVE) self.bottomFrame.pack(side = BOTTOM, fill = X) self.bridgeButton = Button(self.bottomFrame,", "def createWidgets(self): check = Checkbutton(self.x,text = \"Include\",variable = self.checkVar) check.pack(side=LEFT) L1 = Label(self.x,", "= self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x, text = \" File\") L1.pack(side=LEFT) fileEntry =", "+ n) fileLog.write(\"Calibration Values: Slope = \" + str(self.mSlope.get()) + \", Y-Intercept =", "are formatted to be imported into the InField data analysis software. import socket", "= [] counter = 0 for line in content: ### Find which line", "import sys import os from Tkinter import * import tkMessageBox from tkFileDialog import", "10) Button(master = b, command = partial(self.linReg), text = \"Calibrate!\").pack(side = TOP) Label(b,", "import array, ones, linalg # Import numpy import sys import os from Tkinter", "= partial(self.addPoint,a), text = \"Add Point\").pack(side = LEFT,fill = X, padx = 10)", "3) self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration coefficents ###### Calibrated = mSlope * Uncalibrated", "= Button(self.x, command = self.calibrate, text = \"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x, command", "command = partial(self.removePoint), text = \"Remove Point\").pack(side = LEFT, fill = X, padx", "= NORMAL) for b in self.bridges: ### Loop through list of sensors if", "= DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets() def createWidgets(self) : x = Entry(self,", "Logging\", command = self.startLogging, width = 18) self.startButton.pack(side = LEFT) self.stopButton = Button(self.topFrame,", "counter + 1 if line.find(\"DM_Start=\") != -1: break for x in xrange(counter,len(content)-1): ###", "= TOP) Label(b, text = \"Slope\", padx = 15).pack(side = TOP) Entry(b, textvariable", "line in content: ### Find which line the data starts on counter =", "False if(int(fields[0]) < prevTime): # If the processor clock has overflowed timeOffset =", "command = self.startLogging, width = 18) self.startButton.pack(side = LEFT) self.stopButton = Button(self.topFrame, text", "x = Entry(self, textvariable = self.bitValue, width = 8) x.pack(side = LEFT, padx", "Initialization Frame.__init__(self,master, bd = 10) self.pack(side = TOP) #self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self,", "time (time offset) timeOffset = 0 prevTime = 0 prevAdjusted = 0 while", "+ n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" +", "padx = 10) Button(master = d, command = partial(self.removePoint), text = \"Remove Point\").pack(side", "self.isLogging = BooleanVar() self.checkVar = IntVar() self.portVar = IntVar() self.portVar.set(port) self.filePathVar = StringVar()", "self.createWidgets() ###### linear Calibration coefficents ###### Calibrated = mSlope * Uncalibrated + yIntercept", "computing self.processes = [] ### List of bridges self.bridges = [] ###### Tkinter", "self.stopButton.pack(side = RIGHT) Button(self.topFrame, text = \"Multi-Plot\", command = self.plotMultiple).pack(side = LEFT) self.bottomFrame", "chr(13) + chr(10) + \"\" class SensorNetwork(Frame): def __init__(self,master=None): ### List of process", "import Serial Library from numpy import array, ones, linalg # Import numpy import", "n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n)", "self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x, command = self.saveAs, text = \"Browse...\") browseButton.pack(side =", "objects p.terminate() ### Terminate each process p.join() def plotMultiple(self): for b in self.bridges:", "Y-Intercept = \" + str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" + n)", "= 2, padx = 3, pady = 3) self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration", "# Take the time on the very first packet and store it in", "Tkinter variables self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ###### GUI Initialization Frame.__init__(self,master,", "fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\"", "= Button(self.topFrame, text = \"Start Logging\", command = self.startLogging, width = 18) self.startButton.pack(side", "8) x.pack(side = LEFT, padx = 10, pady = 2) y = Entry(self,", "self.bottomFrame.pack(side = BOTTOM, fill = X) self.bridgeButton = Button(self.bottomFrame, text = \"Add Bridge\",", "saveAs(self): print 'Please Select File:' self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes = [('Text", "= 2) return ###### Running code if __name__ == '__main__': root = Tk()", "overwritten\"): return self.startButton.configure(state = DISABLED) self.stopButton.configure(state = NORMAL) for b in self.bridges: ###", "list for x in tempList: # Copy points over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get())", "= TOP) #self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self, padx = 8,pady = 8, bd", "Receiver\") self.topFrame = Frame(master=self, padx = 8,pady = 8, bd = 2, relief", "SD card. # Usage: The program simultaneously reads multiple UDP streams and writes", "= Button(self.topFrame, text = \"Stop Logging\", command = self.stopLogging, width = 18, state", "b, command = partial(self.linReg), text = \"Calibrate!\").pack(side = TOP) Label(b, text = \"Slope\",", "= sock.recvfrom(1024) # buffer size is 1024 bytes packetSplit = data.decode('utf-8') lineSplit =", "import Process n = chr(13) + chr(10) + \"\" class SensorNetwork(Frame): def __init__(self,master=None):", "sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get()))) ### File Setup", "self.mSlope).pack(side = TOP) Label(b, text = \"Y Intercept\", padx = 15).pack(side = TOP)", "def printEntry(self): for x in self.pointList: print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque Value value:", "= TOP) d = Frame(a) d.pack(side = BOTTOM) Label(c, text = \"Bit Value\",", "= NORMAL) self.stopButton.configure(state = DISABLED) for p in self.processes: ### Iterate through list", "os from Tkinter import * import tkMessageBox from tkFileDialog import asksaveasfilename import datetime", "saves it on the local SD card. # Usage: The program simultaneously reads", "sensors if b.checkVar.get(): ### if box is checked p = Process(target = b.startLogging)", "adjustedTime def createWidgets(self): check = Checkbutton(self.x,text = \"Include\",variable = self.checkVar) check.pack(side=LEFT) L1 =", "f.readlines() time = [] torque = [] counter = 0 for line in", "(microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() return (time,torque) def saveAs(self): print 'Please", "= Bridge(self.ip.get(),0, master=self) # Create new bridge object a.x.pack(side = TOP) # Pack", "###### Variables self.ip = ip ###### Interface Initialization Frame.__init__(self.x,master, bd = 2, padx", "fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" + n) isFirst", "window self.bridges.append(a) # Add the object to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously", "size is 1024 bytes packetSplit = data.decode('utf-8') lineSplit = packetSplit.split('\\n') for line in", "0) if len(self.pointList) == 0: # If the list of calibration points is", "2, padx = 3, pady = 3) self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration coefficents", "command = self.calibrate, text = \"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x, command = partial(self.singlePlot,True),", "if b.checkVar.get(): ### if box is checked p = Process(target = b.startLogging) self.processes.append(p)", "NORMAL) self.stopButton.configure(state = DISABLED) for p in self.processes: ### Iterate through list of", "X, padx = 10) Button(master = d, command = partial(self.removePoint), text = \"Remove", "= (\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self): a = Bridge(self.ip.get(),0, master=self) # Create new", "= \"Browse...\") browseButton.pack(side = LEFT) calibrateButton = Button(self.x, command = self.calibrate, text =", "Varibales self.x = Frame() self.isLogging = BooleanVar() self.checkVar = IntVar() self.portVar = IntVar()", "self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B = array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m", "= Frame(master=self, padx = 8,pady = 8, bd = 2, relief = GROOVE)", "InField compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) + n + 'Program Start Time: '", "Point\").pack(side = LEFT,fill = X, padx = 10) Button(master = d, command =", "n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n)", "counter = 0 for line in content: ### Find which line the data", "0 while True: ### Read packets until told to stop data, addr =", "LEFT) self.stopButton = Button(self.topFrame, text = \"Stop Logging\", command = self.stopLogging, width =", "Shift every subsequent packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData) +", "' + str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values: Slope = \" + str(self.mSlope.get()) +", "\"Y Intercept\", padx = 15).pack(side = TOP) Entry(b, textvariable = self.yIntercept).pack(side = TOP)", "Frame(t) b.pack(side = RIGHT) c = Frame(a) c.pack(side = TOP) d = Frame(a)", "# Store points in temporary list self.pointList = [] # Empty out list", "+ n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" +", "n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n)", "self.bitPoints = [] self.torquePoints = [] self.isFirstCalib = True self.bitEntryList = [] self.torqueEntryList", "= calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() # Used for debugging only def printEntry(self):", "y = Entry(self, textvariable = self.torqueValue, width = 8) y.pack(side = LEFT, padx", "local SD card. # Usage: The program simultaneously reads multiple UDP streams and", "of process objects for parallel computing self.processes = [] ### List of bridges", "fileLog = open(self.filePathVar.get(), \"wb\") ### Necessary formatting for InField compatibility fileLog.write(\"DM_TestTitle=\" + n)", "def __init__(self,master=None): ### List of process objects for parallel computing self.processes = []", "# Copy points over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d,", "### Network Connection sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get())))", "###### GUI Initialization Frame.__init__(self,master, bd = 10) self.pack(side = TOP) #self.wm_title(\"Feather Receiver\") self.topFrame", "Value value: \"+ str(x.torqueValue.get())) # Finds slope and y intercept from calibration point", "n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n)", "startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may be overwritten\"): return self.startButton.configure(state =", "InField data analysis software. import socket import serial # import Serial Library from", "= self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x, command = self.saveAs, text", "adjustedTime = int(fields[0])+timeOffset # Shift every subsequent packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) +", "array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) #", "= [] for x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B =", "Calibration coefficents ###### Calibrated = mSlope * Uncalibrated + yIntercept self.mSlope = DoubleVar()", "self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts logging for all selected bridges def", "= content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs.", "n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n)", "def exitWindow(self, frame): frame.withdraw() def reopenWindow(self, frame): frame.update() frame.reiconify() class calibrationPoint(Frame) : def", "Create new bridge object a.x.pack(side = TOP) # Pack it to the top", "files. # The output files are formatted to be imported into the InField", "= 8) x.pack(side = LEFT, padx = 10, pady = 2) y =", "+ str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values: Slope = \" + str(self.mSlope.get()) + \",", "= partial(self.removePoint), text = \"Remove Point\").pack(side = LEFT, fill = X, padx =", "portEntry = Entry(self.x, width = 5, textvariable = self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x,", "= LEFT, fill = X, padx = 10) Button(master = b, command =", "= [] torque = [] counter = 0 for line in content: ###", "= BOTTOM) def addBridge(self): a = Bridge(self.ip.get(),0, master=self) # Create new bridge object", "2, row = 0) if len(self.pointList) == 0: # If the list of", "n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" + n) isFirst = True ### Boolean to", "fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\"", "from previous loop y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\")", "removePoint(self): self.pointList.pop().pack_forget() # Used for debugging only def printEntry(self): for x in self.pointList:", "self.pointList.pop().pack_forget() # Used for debugging only def printEntry(self): for x in self.pointList: print(\"bit", "Torque\") plt.show() return (time,torque) def saveAs(self): print 'Please Select File:' self.file_path = asksaveasfilename(defaultextension", "window t.wm_title(\"PORT: \" + str(self.portVar.get()) + \" Calibration\") a = Frame(t) a.pack(side =", "= BooleanVar() self.checkVar = IntVar() self.portVar = IntVar() self.portVar.set(port) self.filePathVar = StringVar() ######", "fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\"", "fileEntry.pack(side=LEFT) browseButton = Button(self.x, command = self.saveAs, text = \"Browse...\") browseButton.pack(side = LEFT)", "fields = line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst: timeOffset = (-1)*int(fields[0])", "textvariable = self.mSlope).pack(side = TOP) Label(b, text = \"Y Intercept\", padx = 15).pack(side", "calibrationPoint(Frame) : def __init__(self, master): self.bitValue = DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side", "self.processes.append(p) p.start() def stopLogging(self): print (\"Stopping Data Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state =", "+ str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" + n)", "n + 'Program Start Time: ' + str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values: Slope", "xy = b.singlePlot(False) ### Call the singlePlot method for each instance plt.plot(xy[0],xy[1]) ###", "self.bridges: if b.checkVar.get(): xy = b.singlePlot(False) ### Call the singlePlot method for each", "= [] #### Starts Writing to File def startLogging(self): print('Sampling system on Port:", "linReg(self): temp1 = [] temp2 = [] for x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get())", "p.terminate() ### Terminate each process p.join() def plotMultiple(self): for b in self.bridges: if", "+ self.yIntercept.get(),1) if isFirst: timeOffset = (-1)*int(fields[0]) # Take the time on the", "= StringVar() ###### Variables self.ip = ip ###### Interface Initialization Frame.__init__(self.x,master, bd =", "self.filePathVar.set(self.file_path) def addPoint(self, frame): x = calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() # Used", "matplotlib.pyplot as plt from multiprocessing import Process n = chr(13) + chr(10) +", "+ n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" +", "RIGHT) c = Frame(a) c.pack(side = TOP) d = Frame(a) d.pack(side = BOTTOM)", "self.topFrame = Frame(master=self, padx = 8,pady = 8, bd = 2, relief =", "= 3) self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration coefficents ###### Calibrated = mSlope *", "n) isFirst = True ### Boolean to track start time (time offset) timeOffset", "(\"Stopping Data Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state = DISABLED) for p in self.processes:", "multiple UDP streams and writes them to \".txt\" files. # The output files", "Frame() self.isLogging = BooleanVar() self.checkVar = IntVar() self.portVar = IntVar() self.portVar.set(port) self.filePathVar =", "+ n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" +", "f = open(self.filePathVar.get()) content = f.readlines() time = [] torque = [] counter", "a top level window def exitWindow(self, frame): frame.withdraw() def reopenWindow(self, frame): frame.update() frame.reiconify()", "text = \"Start Logging\", command = self.startLogging, width = 18) self.startButton.pack(side = LEFT)", "to File def startLogging(self): print('Sampling system on Port: ' + str(self.portVar.get())) self.isLogging.set(True) ###", "n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n)", "[] ### List of bridges self.bridges = [] ###### Tkinter variables self.ip =", "= 2, relief = GROOVE) self.topFrame.pack(side = TOP, fill = X) self.startButton =", "= LEFT, padx = 10, pady = 2) return ###### Running code if", "Starts Writing to File def startLogging(self): print('Sampling system on Port: ' + str(self.portVar.get()))", "self.startButton = Button(self.topFrame, text = \"Start Logging\", command = self.startLogging, width = 18)", "TOP) # Pack it to the top of the window self.bridges.append(a) # Add", "The output files are formatted to be imported into the InField data analysis", "subsequent packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData) + n) prevTime", "= 10, pady = 2) y = Entry(self, textvariable = self.torqueValue, width =", "in self.bridges: if b.checkVar.get(): xy = b.singlePlot(False) ### Call the singlePlot method for", "= [] # Empty out list for x in tempList: # Copy points", "= TOP) Label(b, text = \"Y Intercept\", padx = 15).pack(side = TOP) Entry(b,", "10) self.pack(side = TOP) #self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self, padx = 8,pady =", "+ n + 'Program Start Time: ' + str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values:", "textvariable = self.torqueValue, width = 8) y.pack(side = LEFT, padx = 10, pady", "temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d, command = partial(self.addPoint,a), text", "# If the processor clock has overflowed timeOffset = prevAdjusted # Then edit", "Bridge\", command = self.addBridge, width = 18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove = Button(self.bottomFrame,", "of process objects p.terminate() ### Terminate each process p.join() def plotMultiple(self): for b", "self.bridgeRemove.pack(side = RIGHT) self.addBridge() # Initialize with one bridge Label(self.bottomFrame, text = \"Neapco", "plt.title(\"Time vs. Torque\") plt.show() class Bridge(): stringFormat = \"{} \\t {}\" def __init__(self,ip,port,master):", "GROOVE) self.topFrame.pack(side = TOP, fill = X) self.startButton = Button(self.topFrame, text = \"Start", "DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount = 0 self.bitPoints = [] self.torquePoints =", "b.checkVar.get(): xy = b.singlePlot(False) ### Call the singlePlot method for each instance plt.plot(xy[0],xy[1])", "n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" + n)", "\"Neapco Components LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self): a =", "+ n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" +", "bd = 10) self.pack(side = TOP) #self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self, padx =", "Button(self.x, command = partial(self.singlePlot,True), text = \"Plot\").pack(side = LEFT) def calibrate(self): #if len(self.pointList)", "= \"Calibrate!\").pack(side = TOP) Label(b, text = \"Slope\", padx = 15).pack(side = TOP)", "pady = 2) y = Entry(self, textvariable = self.torqueValue, width = 8) y.pack(side", "self.processes = [] ### List of bridges self.bridges = [] ###### Tkinter variables", "until told to stop data, addr = sock.recvfrom(1024) # buffer size is 1024", "self.stopButton = Button(self.topFrame, text = \"Stop Logging\", command = self.stopLogging, width = 18,", "= RIGHT) self.addBridge() # Initialize with one bridge Label(self.bottomFrame, text = \"Neapco Components", "box is checked p = Process(target = b.startLogging) self.processes.append(p) p.start() def stopLogging(self): print", "### Read packets until told to stop data, addr = sock.recvfrom(1024) # buffer", "return self.startButton.configure(state = DISABLED) self.stopButton.configure(state = NORMAL) for b in self.bridges: ### Loop", "Frame(a) c.pack(side = TOP) d = Frame(a) d.pack(side = BOTTOM) Label(c, text =", "0: t = Toplevel(self.x) # Open window t.wm_title(\"PORT: \" + str(self.portVar.get()) + \"", "debugging only def printEntry(self): for x in self.pointList: print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque", "int(fields[0])+timeOffset # Shift every subsequent packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' +", "relief = GROOVE) self.topFrame.pack(side = TOP, fill = X) self.startButton = Button(self.topFrame, text", "0) Label(c, text = \"Torque (in-lbs)\", padx = 15).grid(column = 2, row =", "plotMultiple(self): for b in self.bridges: if b.checkVar.get(): xy = b.singlePlot(False) ### Call the", "bd = 2, padx = 3, pady = 3) self.x.pack(side=LEFT) self.createWidgets() ###### linear", "text = \"Remove Bridge\", command = self.removeBridge, width = 18) self.bridgeRemove.pack(side = RIGHT)", "tempList = self.pointList # Store points in temporary list self.pointList = [] #", "= TOP) Entry(b, textvariable = self.mSlope).pack(side = TOP) Label(b, text = \"Y Intercept\",", "sys import os from Tkinter import * import tkMessageBox from tkFileDialog import asksaveasfilename", "def removePoint(self): self.pointList.pop().pack_forget() # Used for debugging only def printEntry(self): for x in", "import asksaveasfilename import datetime import thread from functools import partial import matplotlib.pyplot as", "+ \" Calibration\") a = Frame(t) a.pack(side = LEFT) b = Frame(t) b.pack(side", "RIGHT) self.addBridge() # Initialize with one bridge Label(self.bottomFrame, text = \"Neapco Components LLC:", "= RIGHT) Button(self.topFrame, text = \"Multi-Plot\", command = self.plotMultiple).pack(side = LEFT) self.bottomFrame =", "p = Process(target = b.startLogging) self.processes.append(p) p.start() def stopLogging(self): print (\"Stopping Data Collection\")", "Label(self.bottomFrame, text = \"Neapco Components LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side = BOTTOM) def", "slope and y intercept from calibration point cloud def linReg(self): temp1 = []", "in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B = array([temp2]) w = linalg.lstsq(A.T,B.T)[0]", "store it in to timeOffset isFirst = False if(int(fields[0]) < prevTime): # If", "self.checkVar = IntVar() self.portVar = IntVar() self.portVar.set(port) self.filePathVar = StringVar() ###### Variables self.ip", "Network Connection sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get()))) ###", "DISABLED) for p in self.processes: ### Iterate through list of process objects p.terminate()", "linalg # Import numpy import sys import os from Tkinter import * import", "(time offset) timeOffset = 0 prevTime = 0 prevAdjusted = 0 while True:", "list of process objects p.terminate() ### Terminate each process p.join() def plotMultiple(self): for", "+ yIntercept self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount = 0 self.bitPoints", "asksaveasfilename(defaultextension = '.txt', filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame): x =", "= \"Torque (in-lbs)\", padx = 15).grid(column = 2, row = 0) if len(self.pointList)", "= 2, relief = GROOVE) self.bottomFrame.pack(side = BOTTOM, fill = X) self.bridgeButton =", "Frame(a) d.pack(side = BOTTOM) Label(c, text = \"Bit Value\", padx = 15).grid(column =", "= Frame(t) b.pack(side = RIGHT) c = Frame(a) c.pack(side = TOP) d =", "if b.checkVar.get(): xy = b.singlePlot(False) ### Call the singlePlot method for each instance", "fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\"", "the data starts on counter = counter + 1 if line.find(\"DM_Start=\") != -1:", "object to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts logging for all selected", "= open(self.filePathVar.get(), \"wb\") ### Necessary formatting for InField compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path)", "plt.plot(xy[0],xy[1]) ### Show the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show()", "calibration points is empty for i in xrange(3): self.addPoint(a) else: tempList = self.pointList", "8,pady = 8, bd = 2, relief = GROOVE) self.topFrame.pack(side = TOP, fill", "File def startLogging(self): print('Sampling system on Port: ' + str(self.portVar.get())) self.isLogging.set(True) ### Network", "self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x, command = self.saveAs, text =", "which line the data starts on counter = counter + 1 if line.find(\"DM_Start=\")", "from tkFileDialog import asksaveasfilename import datetime import thread from functools import partial import", "+ n) fileLog.write(str(self.file_path) + n + 'Program Start Time: ' + str(datetime.datetime.now()) +", "self.pack(side = TOP) self.createWidgets() def createWidgets(self) : x = Entry(self, textvariable = self.bitValue,", "(\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self): a = Bridge(self.ip.get(),0, master=self) # Create new bridge", "self.plotMultiple).pack(side = LEFT) self.bottomFrame = Frame(master=self,padx = 8, pady = 8, bd =", "Calibrated = mSlope * Uncalibrated + yIntercept self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept =", "= self.checkVar) check.pack(side=LEFT) L1 = Label(self.x, text = \" PORT\") L1.pack(side=LEFT) portEntry =", "LEFT, padx = 10, pady = 2) return ###### Running code if __name__", "\".txt\" files. # The output files are formatted to be imported into the", "import datetime import thread from functools import partial import matplotlib.pyplot as plt from", "# Exits a top level window def exitWindow(self, frame): frame.withdraw() def reopenWindow(self, frame):", "partial(self.singlePlot,True), text = \"Plot\").pack(side = LEFT) def calibrate(self): #if len(self.pointList) is not 0:", "line in lineSplit: fields = line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst:", "c = Frame(a) c.pack(side = TOP) d = Frame(a) d.pack(side = BOTTOM) Label(c,", "and y intercept from calibration point cloud def linReg(self): temp1 = [] temp2", "points over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d, command =", "BOTTOM) Label(c, text = \"Bit Value\", padx = 15).grid(column = 0,row = 0)", "self.bridges.append(a) # Add the object to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts", "Process(target = b.startLogging) self.processes.append(p) p.start() def stopLogging(self): print (\"Stopping Data Collection\") self.startButton.configure(state =", "(-1)*int(fields[0]) # Take the time on the very first packet and store it", "= 5, textvariable = self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x, text = \" File\")", "\"Remove Point\").pack(side = LEFT, fill = X, padx = 10) Button(master = b,", "### List of bridges self.bridges = [] ###### Tkinter variables self.ip = StringVar()", "all selected bridges def startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may be", "Terminate each process p.join() def plotMultiple(self): for b in self.bridges: if b.checkVar.get(): xy", "not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may be overwritten\"): return self.startButton.configure(state = DISABLED) self.stopButton.configure(state", "text = \"Plot\").pack(side = LEFT) def calibrate(self): #if len(self.pointList) is not 0: t", "[] for x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B = array([temp2])", "[] self.pointList = [] #### Starts Writing to File def startLogging(self): print('Sampling system", "asksaveasfilename import datetime import thread from functools import partial import matplotlib.pyplot as plt", "Read packets until told to stop data, addr = sock.recvfrom(1024) # buffer size", "fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\"", "temp1 = [] temp2 = [] for x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A", "import socket import serial # import Serial Library from numpy import array, ones,", "\"Slope\", padx = 15).pack(side = TOP) Entry(b, textvariable = self.mSlope).pack(side = TOP) Label(b,", "= DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame, text = \"Multi-Plot\", command = self.plotMultiple).pack(side =", "n = chr(13) + chr(10) + \"\" class SensorNetwork(Frame): def __init__(self,master=None): ### List", "= self.removeBridge, width = 18) self.bridgeRemove.pack(side = RIGHT) self.addBridge() # Initialize with one", "= BOTTOM) def singlePlot(self, show): f = open(self.filePathVar.get()) content = f.readlines() time =", "= LEFT) self.bridgeRemove = Button(self.bottomFrame, text = \"Remove Bridge\", command = self.removeBridge, width", "socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get()))) ### File Setup fileLog =", "A = array([temp1,ones(len(temp1))]) B = array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y", "#if len(self.pointList) is not 0: t = Toplevel(self.x) # Open window t.wm_title(\"PORT: \"", "b.checkVar.get(): ### if box is checked p = Process(target = b.startLogging) self.processes.append(p) p.start()", "= self.torqueValue, width = 8) y.pack(side = LEFT, padx = 10, pady =", "bridge Label(self.bottomFrame, text = \"Neapco Components LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side = BOTTOM)", "LEFT) Button(self.x, command = partial(self.singlePlot,True), text = \"Plot\").pack(side = LEFT) def calibrate(self): #if", "L1 = Label(self.x, text = \" File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width = 35,textvariable", "isFirst: timeOffset = (-1)*int(fields[0]) # Take the time on the very first packet", "to track start time (time offset) timeOffset = 0 prevTime = 0 prevAdjusted", "n) fileLog.write(\"Calibration Values: Slope = \" + str(self.mSlope.get()) + \", Y-Intercept = \"", "= self.pointList # Store points in temporary list self.pointList = [] # Empty", "to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts logging for all selected bridges", "of sensors if b.checkVar.get(): ### if box is checked p = Process(target =", "X) self.startButton = Button(self.topFrame, text = \"Start Logging\", command = self.startLogging, width =", "portEntry.pack(side=LEFT) L1 = Label(self.x, text = \" File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width =", "### File Setup fileLog = open(self.filePathVar.get(), \"wb\") ### Necessary formatting for InField compatibility", "for InField compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) + n + 'Program Start Time:", "n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n)", "= 18) self.bridgeRemove.pack(side = RIGHT) self.addBridge() # Initialize with one bridge Label(self.bottomFrame, text", "= 8) y.pack(side = LEFT, padx = 10, pady = 2) return ######", "Frame(master=self,padx = 8, pady = 8, bd = 2, relief = GROOVE) self.bottomFrame.pack(side", "calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() # Used for debugging only def printEntry(self): for", "= line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst: timeOffset = (-1)*int(fields[0]) #", "Frame.__init__(self,master, bd = 10) self.pack(side = TOP) #self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self, padx", "for line in content: ### Find which line the data starts on counter", "+ str(self.portVar.get())) self.isLogging.set(True) ### Network Connection sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) #", "str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\"", "width = 18, state = DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame, text = \"Multi-Plot\",", "top of the window self.bridges.append(a) # Add the object to self.bridges def removeBridge(self):", "serial # import Serial Library from numpy import array, ones, linalg # Import", "= self.addBridge, width = 18) self.bridgeButton.pack(side = LEFT) self.bridgeRemove = Button(self.bottomFrame, text =", "self.filePathVar = StringVar() ###### Variables self.ip = ip ###### Interface Initialization Frame.__init__(self.x,master, bd", "# import Serial Library from numpy import array, ones, linalg # Import numpy", "= b.singlePlot(False) ### Call the singlePlot method for each instance plt.plot(xy[0],xy[1]) ### Show", "in tempList: # Copy points over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master", "import * import tkMessageBox from tkFileDialog import asksaveasfilename import datetime import thread from", "= Button(self.x, command = self.saveAs, text = \"Browse...\") browseButton.pack(side = LEFT) calibrateButton =", "10) Button(master = d, command = partial(self.removePoint), text = \"Remove Point\").pack(side = LEFT,", "data from multiple sensors and saves it on the local SD card. #", "stopLogging(self): print (\"Stopping Data Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state = DISABLED) for p", "### Starting on the line number found from previous loop y = content[x].split(\"\\t\")", "plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() class Bridge(): stringFormat = \"{}", "str(self.mSlope.get()) + \", Y-Intercept = \" + str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" + n)", "!= -1: break for x in xrange(counter,len(content)-1): ### Starting on the line number", "= \" + str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\"", "Button(master = b, command = partial(self.linReg), text = \"Calibrate!\").pack(side = TOP) Label(b, text", "print(\"Torque Value value: \"+ str(x.torqueValue.get())) # Finds slope and y intercept from calibration", "self.bridgeButton.pack(side = LEFT) self.bridgeRemove = Button(self.bottomFrame, text = \"Remove Bridge\", command = self.removeBridge,", "n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n)", "= 10) self.pack(side = TOP) #self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self, padx = 8,pady", "overflowed timeOffset = prevAdjusted # Then edit the time offset value adjustedTime =", "fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\"", "x.pack(side = LEFT, padx = 10, pady = 2) y = Entry(self, textvariable", "= 0 for line in content: ### Find which line the data starts", "= packetSplit.split('\\n') for line in lineSplit: fields = line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) +", "= mSlope * Uncalibrated + yIntercept self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0)", "of the window self.bridges.append(a) # Add the object to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget()", "self.torqueValue, width = 8) y.pack(side = LEFT, padx = 10, pady = 2)", "parallel computing self.processes = [] ### List of bridges self.bridges = [] ######", "Writing to File def startLogging(self): print('Sampling system on Port: ' + str(self.portVar.get())) self.isLogging.set(True)", "functools import partial import matplotlib.pyplot as plt from multiprocessing import Process n =", "Port: ' + str(self.portVar.get())) self.isLogging.set(True) ### Network Connection sock = socket.socket(socket.AF_INET, # Internet", "in content: ### Find which line the data starts on counter = counter", "= Entry(self.x, width = 5, textvariable = self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x, text", "Pack it to the top of the window self.bridges.append(a) # Add the object", "padx = 3, pady = 3) self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration coefficents ######", "self.calibrate, text = \"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x, command = partial(self.singlePlot,True), text =", "partial(self.removePoint), text = \"Remove Point\").pack(side = LEFT, fill = X, padx = 10)", "= LEFT) calibrateButton = Button(self.x, command = self.calibrate, text = \"Calibrate\") calibrateButton.pack(side =", "n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" + n)", "= (-1)*int(fields[0]) # Take the time on the very first packet and store", "2, relief = GROOVE) self.topFrame.pack(side = TOP, fill = X) self.startButton = Button(self.topFrame,", "LEFT) calibrateButton = Button(self.x, command = self.calibrate, text = \"Calibrate\") calibrateButton.pack(side = LEFT)", "= 10, pady = 2) return ###### Running code if __name__ == '__main__':", "for all selected bridges def startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may", "b.startLogging) self.processes.append(p) p.start() def stopLogging(self): print (\"Stopping Data Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state", "DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets() def createWidgets(self) : x", "padx = 15).pack(side = TOP) Entry(b, textvariable = self.yIntercept).pack(side = TOP) Button(b, command", "self.mSlope.set(m) self.yIntercept.set(y) # Exits a top level window def exitWindow(self, frame): frame.withdraw() def", "class calibrationPoint(Frame) : def __init__(self, master): self.bitValue = DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master)", "n) prevTime = int(fields[0]) prevAdjusted = adjustedTime def createWidgets(self): check = Checkbutton(self.x,text =", "singlePlot(self, show): f = open(self.filePathVar.get()) content = f.readlines() time = [] torque =", "-1: break for x in xrange(counter,len(content)-1): ### Starting on the line number found", "self.isLogging.set(True) ### Network Connection sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip,", "= self.yIntercept).pack(side = TOP) Button(b, command = partial(self.exitWindow,t), text = \"OK\").pack(side = BOTTOM)", "15).grid(column = 2, row = 0) if len(self.pointList) == 0: # If the", "(time,torque) def saveAs(self): print 'Please Select File:' self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes", "d, command = partial(self.addPoint,a), text = \"Add Point\").pack(side = LEFT,fill = X, padx", "int(fields[0]) prevAdjusted = adjustedTime def createWidgets(self): check = Checkbutton(self.x,text = \"Include\",variable = self.checkVar)", "Bridge\", command = self.removeBridge, width = 18) self.bridgeRemove.pack(side = RIGHT) self.addBridge() # Initialize", "width = 5, textvariable = self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x, text = \"", "content = f.readlines() time = [] torque = [] counter = 0 for", "to \".txt\" files. # The output files are formatted to be imported into", "time = [] torque = [] counter = 0 for line in content:", "\"Include\",variable = self.checkVar) check.pack(side=LEFT) L1 = Label(self.x, text = \" PORT\") L1.pack(side=LEFT) portEntry", "Logging\",\"Are you sure?\\nFiles may be overwritten\"): return self.startButton.configure(state = DISABLED) self.stopButton.configure(state = NORMAL)", "TOP) self.createWidgets() def createWidgets(self) : x = Entry(self, textvariable = self.bitValue, width =", "### Find which line the data starts on counter = counter + 1", "it on the local SD card. # Usage: The program simultaneously reads multiple", "#self.wm_title(\"Feather Receiver\") self.topFrame = Frame(master=self, padx = 8,pady = 8, bd = 2,", "formatting for InField compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) + n + 'Program Start", "\"Stop Logging\", command = self.stopLogging, width = 18, state = DISABLED) self.stopButton.pack(side =", "Starting on the line number found from previous loop y = content[x].split(\"\\t\") time.append(y[0])", "if(int(fields[0]) < prevTime): # If the processor clock has overflowed timeOffset = prevAdjusted", "timeOffset = 0 prevTime = 0 prevAdjusted = 0 while True: ### Read", "x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B = array([temp2]) w =", "= \"Remove Point\").pack(side = LEFT, fill = X, padx = 10) Button(master =", "L1 = Label(self.x, text = \" PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x, width =", "text = \"Browse...\") browseButton.pack(side = LEFT) calibrateButton = Button(self.x, command = self.calibrate, text", "window def exitWindow(self, frame): frame.withdraw() def reopenWindow(self, frame): frame.update() frame.reiconify() class calibrationPoint(Frame) :", "width = 8) x.pack(side = LEFT, padx = 10, pady = 2) y", "File:' self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self,", "= [] ### List of bridges self.bridges = [] ###### Tkinter variables self.ip", "print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque Value value: \"+ str(x.torqueValue.get())) # Finds slope and", "loop y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\")", "fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\"", "multiple sensors and saves it on the local SD card. # Usage: The", "bd = 2, relief = GROOVE) self.bottomFrame.pack(side = BOTTOM, fill = X) self.bridgeButton", "Setup fileLog = open(self.filePathVar.get(), \"wb\") ### Necessary formatting for InField compatibility fileLog.write(\"DM_TestTitle=\" +", "counter = counter + 1 if line.find(\"DM_Start=\") != -1: break for x in", "linear Calibration coefficents ###### Calibrated = mSlope * Uncalibrated + yIntercept self.mSlope =", "sure?\\nFiles may be overwritten\"): return self.startButton.configure(state = DISABLED) self.stopButton.configure(state = NORMAL) for b", "Label(self.x, text = \" PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x, width = 5, textvariable", "[] counter = 0 for line in content: ### Find which line the", "packets until told to stop data, addr = sock.recvfrom(1024) # buffer size is", "\" + str(self.portVar.get()) + \" Calibration\") a = Frame(t) a.pack(side = LEFT) b", "L1.pack(side=LEFT) fileEntry = Entry(self.x,width = 35,textvariable = self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton", "= Label(self.x, text = \" File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width = 35,textvariable =", "browseButton.pack(side = LEFT) calibrateButton = Button(self.x, command = self.calibrate, text = \"Calibrate\") calibrateButton.pack(side", "### Call the singlePlot method for each instance plt.plot(xy[0],xy[1]) ### Show the plot", "fileLog.write(\"Calibration Values: Slope = \" + str(self.mSlope.get()) + \", Y-Intercept = \" +", "+ \", Y-Intercept = \" + str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\"", "the top of the window self.bridges.append(a) # Add the object to self.bridges def", "state = DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame, text = \"Multi-Plot\", command = self.plotMultiple).pack(side", "+ str(self.portVar.get()) + \" Calibration\") a = Frame(t) a.pack(side = LEFT) b =", "self.checkVar) check.pack(side=LEFT) L1 = Label(self.x, text = \" PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x,", "n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" + n)", "= \"Neapco Components LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self): a", "createWidgets(self): check = Checkbutton(self.x,text = \"Include\",variable = self.checkVar) check.pack(side=LEFT) L1 = Label(self.x, text", "self.portVar = IntVar() self.portVar.set(port) self.filePathVar = StringVar() ###### Variables self.ip = ip ######", "data.decode('utf-8') lineSplit = packetSplit.split('\\n') for line in lineSplit: fields = line.split(',') calibratedData =", "n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n)", "* Uncalibrated + yIntercept self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount =", "self.bridgeRemove = Button(self.bottomFrame, text = \"Remove Bridge\", command = self.removeBridge, width = 18)", "textvariable = self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x, text = \" File\") L1.pack(side=LEFT) fileEntry", "xrange(3): self.addPoint(a) else: tempList = self.pointList # Store points in temporary list self.pointList", "= int(fields[0]) prevAdjusted = adjustedTime def createWidgets(self): check = Checkbutton(self.x,text = \"Include\",variable =", "= LEFT) def calibrate(self): #if len(self.pointList) is not 0: t = Toplevel(self.x) #", "objects for parallel computing self.processes = [] ### List of bridges self.bridges =", "relief = GROOVE) self.bottomFrame.pack(side = BOTTOM, fill = X) self.bridgeButton = Button(self.bottomFrame, text", "= False if(int(fields[0]) < prevTime): # If the processor clock has overflowed timeOffset", "the time offset value adjustedTime = int(fields[0])+timeOffset # Shift every subsequent packet by", "mSlope * Uncalibrated + yIntercept self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount", "intercept from calibration point cloud def linReg(self): temp1 = [] temp2 = []", "frame.withdraw() def reopenWindow(self, frame): frame.update() frame.reiconify() class calibrationPoint(Frame) : def __init__(self, master): self.bitValue", "B = array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m)", "text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x, command = self.saveAs, text = \"Browse...\")", "self.stopButton.configure(state = DISABLED) for p in self.processes: ### Iterate through list of process", "### Show the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() class", "# Used for debugging only def printEntry(self): for x in self.pointList: print(\"bit value:", "+ n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" +", "d = Frame(a) d.pack(side = BOTTOM) Label(c, text = \"Bit Value\", padx =", "= Entry(self, textvariable = self.bitValue, width = 8) x.pack(side = LEFT, padx =", "for x in xrange(counter,len(content)-1): ### Starting on the line number found from previous", "TOP, fill = X) self.startButton = Button(self.topFrame, text = \"Start Logging\", command =", "self.torquePoints = [] self.isFirstCalib = True self.bitEntryList = [] self.torqueEntryList = [] self.pointList", "print (\"Stopping Data Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state = DISABLED) for p in", "isFirst = False if(int(fields[0]) < prevTime): # If the processor clock has overflowed", "' + str(self.portVar.get())) self.isLogging.set(True) ### Network Connection sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM)", "object a.x.pack(side = TOP) # Pack it to the top of the window", "sensors and saves it on the local SD card. # Usage: The program", "Serial Library from numpy import array, ones, linalg # Import numpy import sys", "to timeOffset isFirst = False if(int(fields[0]) < prevTime): # If the processor clock", "Time: ' + str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values: Slope = \" + str(self.mSlope.get())", "= round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst: timeOffset = (-1)*int(fields[0]) # Take the time", "the object to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts logging for all", "+ n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" +", "be overwritten\"): return self.startButton.configure(state = DISABLED) self.stopButton.configure(state = NORMAL) for b in self.bridges:", "8, bd = 2, relief = GROOVE) self.bottomFrame.pack(side = BOTTOM, fill = X)", "= Frame(master=self,padx = 8, pady = 8, bd = 2, relief = GROOVE)", "Toplevel(self.x) # Open window t.wm_title(\"PORT: \" + str(self.portVar.get()) + \" Calibration\") a =", "transmitted data from multiple sensors and saves it on the local SD card.", "fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) + n + 'Program Start Time: ' + str(datetime.datetime.now())", "__init__(self,master=None): ### List of process objects for parallel computing self.processes = [] ###", "(inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() class Bridge(): stringFormat = \"{} \\t {}\" def", "def calibrate(self): #if len(self.pointList) is not 0: t = Toplevel(self.x) # Open window", "Uncalibrated + yIntercept self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount = 0", "= int(fields[0])+timeOffset # Shift every subsequent packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t'", "a.x.pack(side = TOP) # Pack it to the top of the window self.bridges.append(a)", "Select File:' self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def", "'Please Select File:' self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path)", "bridge object a.x.pack(side = TOP) # Pack it to the top of the", "n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" + n) isFirst = True", "plt.show() return (time,torque) def saveAs(self): print 'Please Select File:' self.file_path = asksaveasfilename(defaultextension =", "= DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount = 0 self.bitPoints = [] self.torquePoints", "+ n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" +", "= \" PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x, width = 5, textvariable = self.portVar)", "compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) + n + 'Program Start Time: ' +", "Label(b, text = \"Y Intercept\", padx = 15).pack(side = TOP) Entry(b, textvariable =", "\\t {}\" def __init__(self,ip,port,master): ###### Tkinter Varibales self.x = Frame() self.isLogging = BooleanVar()", "= Label(self.x, text = \" PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x, width = 5,", "text = \" File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width = 35,textvariable = self.filePathVar, text", "LEFT, padx = 10, pady = 2) y = Entry(self, textvariable = self.torqueValue,", "(in-lbs)\", padx = 15).grid(column = 2, row = 0) if len(self.pointList) == 0:", "Button(b, command = partial(self.exitWindow,t), text = \"OK\").pack(side = BOTTOM) def singlePlot(self, show): f", "is checked p = Process(target = b.startLogging) self.processes.append(p) p.start() def stopLogging(self): print (\"Stopping", "\"OK\").pack(side = BOTTOM) def singlePlot(self, show): f = open(self.filePathVar.get()) content = f.readlines() time", "\"Bit Value\", padx = 15).grid(column = 0,row = 0) Label(c, text = \"Torque", "[('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame): x = calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget()", "\"Calibrate\") calibrateButton.pack(side = LEFT) Button(self.x, command = partial(self.singlePlot,True), text = \"Plot\").pack(side = LEFT)", "selected bridges def startLogging(self): if not tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may be overwritten\"):", "# Shift every subsequent packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData)", "if isFirst: timeOffset = (-1)*int(fields[0]) # Take the time on the very first", "process objects p.terminate() ### Terminate each process p.join() def plotMultiple(self): for b in", "Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets() def createWidgets(self) : x = Entry(self, textvariable =", "+ n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" +", "program reads wirelessly transmitted data from multiple sensors and saves it on the", "The program simultaneously reads multiple UDP streams and writes them to \".txt\" files.", "= linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits a", "Then edit the time offset value adjustedTime = int(fields[0])+timeOffset # Shift every subsequent", "= [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame): x = calibrationPoint(frame) self.pointList.append(x) def removePoint(self):", "numpy import sys import os from Tkinter import * import tkMessageBox from tkFileDialog", "Logging\", command = self.stopLogging, width = 18, state = DISABLED) self.stopButton.pack(side = RIGHT)", "'\\t' + str(calibratedData) + n) prevTime = int(fields[0]) prevAdjusted = adjustedTime def createWidgets(self):", "= 8, bd = 2, relief = GROOVE) self.topFrame.pack(side = TOP, fill =", "= 18) self.startButton.pack(side = LEFT) self.stopButton = Button(self.topFrame, text = \"Stop Logging\", command", "in self.pointList: print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque Value value: \"+ str(x.torqueValue.get())) # Finds", "temp2 = [] for x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B", "def __init__(self, master): self.bitValue = DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP)", "sock.recvfrom(1024) # buffer size is 1024 bytes packetSplit = data.decode('utf-8') lineSplit = packetSplit.split('\\n')", "class SensorNetwork(Frame): def __init__(self,master=None): ### List of process objects for parallel computing self.processes", "round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst: timeOffset = (-1)*int(fields[0]) # Take the time on", "vs. Torque\") plt.show() return (time,torque) def saveAs(self): print 'Please Select File:' self.file_path =", "StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ###### GUI Initialization Frame.__init__(self,master, bd = 10) self.pack(side", "c.pack(side = TOP) d = Frame(a) d.pack(side = BOTTOM) Label(c, text = \"Bit", "class Bridge(): stringFormat = \"{} \\t {}\" def __init__(self,ip,port,master): ###### Tkinter Varibales self.x", "self.bitEntryList = [] self.torqueEntryList = [] self.pointList = [] #### Starts Writing to", "text = \"Stop Logging\", command = self.stopLogging, width = 18, state = DISABLED)", "= 3, pady = 3) self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration coefficents ###### Calibrated", "through list of sensors if b.checkVar.get(): ### if box is checked p =", "= counter + 1 if line.find(\"DM_Start=\") != -1: break for x in xrange(counter,len(content)-1):", "= open(self.filePathVar.get()) content = f.readlines() time = [] torque = [] counter =", "= b, command = partial(self.linReg), text = \"Calibrate!\").pack(side = TOP) Label(b, text =", "packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData) + n) prevTime =", "self.stopButton.configure(state = NORMAL) for b in self.bridges: ### Loop through list of sensors", "= asksaveasfilename(defaultextension = '.txt', filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame): x", "for b in self.bridges: if b.checkVar.get(): xy = b.singlePlot(False) ### Call the singlePlot", "LEFT) def calibrate(self): #if len(self.pointList) is not 0: t = Toplevel(self.x) # Open", "on the line number found from previous loop y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1])", "the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() class Bridge(): stringFormat", "round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits a top level window def exitWindow(self, frame): frame.withdraw()", "told to stop data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes", "if __name__ == '__main__': root = Tk() root.wm_title(\"Gage Logger\") app = SensorNetwork(master=root) app.mainloop()", "def startLogging(self): print('Sampling system on Port: ' + str(self.portVar.get())) self.isLogging.set(True) ### Network Connection", "= Entry(self.x,width = 35,textvariable = self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x,", "print 'Please Select File:' self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes = [('Text Files','.txt')])", "n) fileLog.write(\"DM_Start=\" + n) isFirst = True ### Boolean to track start time", "= \"Slope\", padx = 15).pack(side = TOP) Entry(b, textvariable = self.mSlope).pack(side = TOP)", "= RIGHT) c = Frame(a) c.pack(side = TOP) d = Frame(a) d.pack(side =", "i in xrange(3): self.addPoint(a) else: tempList = self.pointList # Store points in temporary", "Used for debugging only def printEntry(self): for x in self.pointList: print(\"bit value: \"+", "= DISABLED) self.stopButton.configure(state = NORMAL) for b in self.bridges: ### Loop through list", "text = \"Torque (in-lbs)\", padx = 15).grid(column = 2, row = 0) if", "Button(master = d, command = partial(self.removePoint), text = \"Remove Point\").pack(side = LEFT, fill", "cloud def linReg(self): temp1 = [] temp2 = [] for x in self.pointList:", "n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n)", "through list of process objects p.terminate() ### Terminate each process p.join() def plotMultiple(self):", "IntVar() self.portVar.set(port) self.filePathVar = StringVar() ###### Variables self.ip = ip ###### Interface Initialization", "= \"{} \\t {}\" def __init__(self,ip,port,master): ###### Tkinter Varibales self.x = Frame() self.isLogging", "self.file_path = asksaveasfilename(defaultextension = '.txt', filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame):", "DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame, text = \"Multi-Plot\", command = self.plotMultiple).pack(side = LEFT)", "number found from previous loop y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque)", "str(x.torqueValue.get())) # Finds slope and y intercept from calibration point cloud def linReg(self):", "open(self.filePathVar.get()) content = f.readlines() time = [] torque = [] counter = 0", "= partial(self.singlePlot,True), text = \"Plot\").pack(side = LEFT) def calibrate(self): #if len(self.pointList) is not", "# Create new bridge object a.x.pack(side = TOP) # Pack it to the", "to be imported into the InField data analysis software. import socket import serial", "tkMessageBox.askyesno(\"Start Logging\",\"Are you sure?\\nFiles may be overwritten\"): return self.startButton.configure(state = DISABLED) self.stopButton.configure(state =", "fileLog.write(\"DM_NumLogChans=2\" + n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\"", "TOP) Entry(b, textvariable = self.mSlope).pack(side = TOP) Label(b, text = \"Y Intercept\", padx", "= Process(target = b.startLogging) self.processes.append(p) p.start() def stopLogging(self): print (\"Stopping Data Collection\") self.startButton.configure(state", "+ n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=us\" +", "text = \"Y Intercept\", padx = 15).pack(side = TOP) Entry(b, textvariable = self.yIntercept).pack(side", "= Frame(a) d.pack(side = BOTTOM) Label(c, text = \"Bit Value\", padx = 15).grid(column", "may be overwritten\"): return self.startButton.configure(state = DISABLED) self.stopButton.configure(state = NORMAL) for b in", "line.find(\"DM_Start=\") != -1: break for x in xrange(counter,len(content)-1): ### Starting on the line", "UDP sock.bind((self.ip, (self.portVar.get()))) ### File Setup fileLog = open(self.filePathVar.get(), \"wb\") ### Necessary formatting", "software. import socket import serial # import Serial Library from numpy import array,", "prevTime = 0 prevAdjusted = 0 while True: ### Read packets until told", "self.pointList.append(temp) Button(master = d, command = partial(self.addPoint,a), text = \"Add Point\").pack(side = LEFT,fill", "temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B = array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3)", "text = \"Bit Value\", padx = 15).grid(column = 0,row = 0) Label(c, text", "[] temp2 = [] for x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))])", "frame): frame.update() frame.reiconify() class calibrationPoint(Frame) : def __init__(self, master): self.bitValue = DoubleVar(0) self.torqueValue", "d, command = partial(self.removePoint), text = \"Remove Point\").pack(side = LEFT, fill = X,", "18, state = DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame, text = \"Multi-Plot\", command =", "a = Frame(t) a.pack(side = LEFT) b = Frame(t) b.pack(side = RIGHT) c", "def createWidgets(self) : x = Entry(self, textvariable = self.bitValue, width = 8) x.pack(side", "# Pack it to the top of the window self.bridges.append(a) # Add the", "timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData) + n) prevTime = int(fields[0]) prevAdjusted =", "(microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() class Bridge(): stringFormat = \"{} \\t", "bd = 2, relief = GROOVE) self.topFrame.pack(side = TOP, fill = X) self.startButton", "t.wm_title(\"PORT: \" + str(self.portVar.get()) + \" Calibration\") a = Frame(t) a.pack(side = LEFT)", "[] #### Starts Writing to File def startLogging(self): print('Sampling system on Port: '", "frame): frame.withdraw() def reopenWindow(self, frame): frame.update() frame.reiconify() class calibrationPoint(Frame) : def __init__(self, master):", "+ n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" +", "lineSplit: fields = line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst: timeOffset =", "from functools import partial import matplotlib.pyplot as plt from multiprocessing import Process n", "fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Time\"", "if box is checked p = Process(target = b.startLogging) self.processes.append(p) p.start() def stopLogging(self):", "# Description: The program reads wirelessly transmitted data from multiple sensors and saves", "[] self.torquePoints = [] self.isFirstCalib = True self.bitEntryList = [] self.torqueEntryList = []", "= partial(self.linReg), text = \"Calibrate!\").pack(side = TOP) Label(b, text = \"Slope\", padx =", "= Frame(a) c.pack(side = TOP) d = Frame(a) d.pack(side = BOTTOM) Label(c, text", "+ str(self.mSlope.get()) + \", Y-Intercept = \" + str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" +", "+ n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" +", "self.bitValue, width = 8) x.pack(side = LEFT, padx = 10, pady = 2)", "fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" + n) isFirst = True ### Boolean to track", "[] torque = [] counter = 0 for line in content: ### Find", "list self.pointList = [] # Empty out list for x in tempList: #", "BOTTOM) def addBridge(self): a = Bridge(self.ip.get(),0, master=self) # Create new bridge object a.x.pack(side", "each process p.join() def plotMultiple(self): for b in self.bridges: if b.checkVar.get(): xy =", "### Terminate each process p.join() def plotMultiple(self): for b in self.bridges: if b.checkVar.get():", "offset value adjustedTime = int(fields[0])+timeOffset # Shift every subsequent packet by the timeOffset", "print('Sampling system on Port: ' + str(self.portVar.get())) self.isLogging.set(True) ### Network Connection sock =", "DISABLED) self.stopButton.configure(state = NORMAL) for b in self.bridges: ### Loop through list of", "to stop data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes packetSplit", "partial(self.exitWindow,t), text = \"OK\").pack(side = BOTTOM) def singlePlot(self, show): f = open(self.filePathVar.get()) content", "= IntVar() self.portVar.set(port) self.filePathVar = StringVar() ###### Variables self.ip = ip ###### Interface", "'.txt', filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame): x = calibrationPoint(frame) self.pointList.append(x)", "in self.processes: ### Iterate through list of process objects p.terminate() ### Terminate each", "Frame(master=self, padx = 8,pady = 8, bd = 2, relief = GROOVE) self.topFrame.pack(side", "a = Bridge(self.ip.get(),0, master=self) # Create new bridge object a.x.pack(side = TOP) #", "self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x, text = \" File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width", "= LEFT,fill = X, padx = 10) Button(master = d, command = partial(self.removePoint),", "= ip ###### Interface Initialization Frame.__init__(self.x,master, bd = 2, padx = 3, pady", "x in self.pointList: print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque Value value: \"+ str(x.torqueValue.get())) #", "self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets() def createWidgets(self) : x =", "LEFT,fill = X, padx = 10) Button(master = d, command = partial(self.removePoint), text", "from numpy import array, ones, linalg # Import numpy import sys import os", "Find which line the data starts on counter = counter + 1 if", "+ str(calibratedData) + n) prevTime = int(fields[0]) prevAdjusted = adjustedTime def createWidgets(self): check", "Label(c, text = \"Torque (in-lbs)\", padx = 15).grid(column = 2, row = 0)", "has overflowed timeOffset = prevAdjusted # Then edit the time offset value adjustedTime", "bridges self.bridges = [] ###### Tkinter variables self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging =", "BOTTOM, fill = X) self.bridgeButton = Button(self.bottomFrame, text = \"Add Bridge\", command =", "0 prevAdjusted = 0 while True: ### Read packets until told to stop", "Slope = \" + str(self.mSlope.get()) + \", Y-Intercept = \" + str(self.yIntercept.get()) +", "for p in self.processes: ### Iterate through list of process objects p.terminate() ###", "n) fileLog.write(\"DM_NumDataModes=1\" + n) fileLog.write(\"DM_LogicalChan=1\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n)", "command = self.plotMultiple).pack(side = LEFT) self.bottomFrame = Frame(master=self,padx = 8, pady = 8,", "= TOP, fill = X) self.startButton = Button(self.topFrame, text = \"Start Logging\", command", "DoubleVar(0) self.pointCount = 0 self.bitPoints = [] self.torquePoints = [] self.isFirstCalib = True", "Bridge(self.ip.get(),0, master=self) # Create new bridge object a.x.pack(side = TOP) # Pack it", "TOP) d = Frame(a) d.pack(side = BOTTOM) Label(c, text = \"Bit Value\", padx", "import os from Tkinter import * import tkMessageBox from tkFileDialog import asksaveasfilename import", "fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\"", "n) fileLog.write(str(self.file_path) + n + 'Program Start Time: ' + str(datetime.datetime.now()) + n)", "import tkMessageBox from tkFileDialog import asksaveasfilename import datetime import thread from functools import", "+ n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\" + n) isFirst = True ### Boolean", "line number found from previous loop y = content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show:", "edit the time offset value adjustedTime = int(fields[0])+timeOffset # Shift every subsequent packet", "padx = 15).grid(column = 2, row = 0) if len(self.pointList) == 0: #", "= [] self.torquePoints = [] self.isFirstCalib = True self.bitEntryList = [] self.torqueEntryList =", "program simultaneously reads multiple UDP streams and writes them to \".txt\" files. #", "def __init__(self,ip,port,master): ###### Tkinter Varibales self.x = Frame() self.isLogging = BooleanVar() self.checkVar =", "self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() # Used for debugging only def printEntry(self): for x", "= 10) Button(master = b, command = partial(self.linReg), text = \"Calibrate!\").pack(side = TOP)", "frame): x = calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() # Used for debugging only", "import serial # import Serial Library from numpy import array, ones, linalg #", "Store points in temporary list self.pointList = [] # Empty out list for", "in xrange(counter,len(content)-1): ### Starting on the line number found from previous loop y", "of bridges self.bridges = [] ###### Tkinter variables self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging", "(inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() return (time,torque) def saveAs(self): print 'Please Select File:'", "= b.startLogging) self.processes.append(p) p.start() def stopLogging(self): print (\"Stopping Data Collection\") self.startButton.configure(state = NORMAL)", "reads multiple UDP streams and writes them to \".txt\" files. # The output", "is not 0: t = Toplevel(self.x) # Open window t.wm_title(\"PORT: \" + str(self.portVar.get())", "= '.txt', filetypes = [('Text Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame): x = calibrationPoint(frame)", "be imported into the InField data analysis software. import socket import serial #", "prevAdjusted = 0 while True: ### Read packets until told to stop data,", "timeOffset isFirst = False if(int(fields[0]) < prevTime): # If the processor clock has", "import thread from functools import partial import matplotlib.pyplot as plt from multiprocessing import", "= GROOVE) self.topFrame.pack(side = TOP, fill = X) self.startButton = Button(self.topFrame, text =", "= array([temp1,ones(len(temp1))]) B = array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y =", "you sure?\\nFiles may be overwritten\"): return self.startButton.configure(state = DISABLED) self.stopButton.configure(state = NORMAL) for", "Button(self.topFrame, text = \"Start Logging\", command = self.startLogging, width = 18) self.startButton.pack(side =", "= 15).grid(column = 0,row = 0) Label(c, text = \"Torque (in-lbs)\", padx =", "for parallel computing self.processes = [] ### List of bridges self.bridges = []", "data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes packetSplit = data.decode('utf-8')", "= 35,textvariable = self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x, command =", "for each instance plt.plot(xy[0],xy[1]) ### Show the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time", "= 0 self.bitPoints = [] self.torquePoints = [] self.isFirstCalib = True self.bitEntryList =", "self.isLogging = BooleanVar() ###### GUI Initialization Frame.__init__(self,master, bd = 10) self.pack(side = TOP)", "prevTime): # If the processor clock has overflowed timeOffset = prevAdjusted # Then", "# buffer size is 1024 bytes packetSplit = data.decode('utf-8') lineSplit = packetSplit.split('\\n') for", "= self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x, command = self.saveAs, text = \"Browse...\") browseButton.pack(side", "self.processes: ### Iterate through list of process objects p.terminate() ### Terminate each process", "each instance plt.plot(xy[0],xy[1]) ### Show the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs.", "### Iterate through list of process objects p.terminate() ### Terminate each process p.join()", "'Program Start Time: ' + str(datetime.datetime.now()) + n) fileLog.write(\"Calibration Values: Slope = \"", "sock.bind((self.ip, (self.portVar.get()))) ### File Setup fileLog = open(self.filePathVar.get(), \"wb\") ### Necessary formatting for", "the time on the very first packet and store it in to timeOffset", "# Usage: The program simultaneously reads multiple UDP streams and writes them to", "value: \"+ str(x.bitValue.get())) print(\"Torque Value value: \"+ str(x.torqueValue.get())) # Finds slope and y", "+ n) fileLog.write(\"DM_Start=\" + n) isFirst = True ### Boolean to track start", "first packet and store it in to timeOffset isFirst = False if(int(fields[0]) <", "Process n = chr(13) + chr(10) + \"\" class SensorNetwork(Frame): def __init__(self,master=None): ###", "addPoint(self, frame): x = calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() # Used for debugging", "line.split(',') calibratedData = round(self.mSlope.get()*float(fields[1]) + self.yIntercept.get(),1) if isFirst: timeOffset = (-1)*int(fields[0]) # Take", "bytes packetSplit = data.decode('utf-8') lineSplit = packetSplit.split('\\n') for line in lineSplit: fields =", "d.pack(side = BOTTOM) Label(c, text = \"Bit Value\", padx = 15).grid(column = 0,row", "p.start() def stopLogging(self): print (\"Stopping Data Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state = DISABLED)", "value adjustedTime = int(fields[0])+timeOffset # Shift every subsequent packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000)", "them to \".txt\" files. # The output files are formatted to be imported", "= self.stopLogging, width = 18, state = DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame, text", "self.portVar.set(port) self.filePathVar = StringVar() ###### Variables self.ip = ip ###### Interface Initialization Frame.__init__(self.x,master,", "stop data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes packetSplit =", "self.x = Frame() self.isLogging = BooleanVar() self.checkVar = IntVar() self.portVar = IntVar() self.portVar.set(port)", "= BooleanVar() ###### GUI Initialization Frame.__init__(self,master, bd = 10) self.pack(side = TOP) #self.wm_title(\"Feather", "partial import matplotlib.pyplot as plt from multiprocessing import Process n = chr(13) +", "\"Browse...\") browseButton.pack(side = LEFT) calibrateButton = Button(self.x, command = self.calibrate, text = \"Calibrate\")", "is 1024 bytes packetSplit = data.decode('utf-8') lineSplit = packetSplit.split('\\n') for line in lineSplit:", "True ### Boolean to track start time (time offset) timeOffset = 0 prevTime", "show): f = open(self.filePathVar.get()) content = f.readlines() time = [] torque = []", "fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\" + n) fileLog.write(\"DM_Start=\"", "text = \"Slope\", padx = 15).pack(side = TOP) Entry(b, textvariable = self.mSlope).pack(side =", "plt.title(\"Time vs. Torque\") plt.show() return (time,torque) def saveAs(self): print 'Please Select File:' self.file_path", "padx = 10, pady = 2) y = Entry(self, textvariable = self.torqueValue, width", "True: ### Read packets until told to stop data, addr = sock.recvfrom(1024) #", ": def __init__(self, master): self.bitValue = DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side =", "= 15).pack(side = TOP) Entry(b, textvariable = self.mSlope).pack(side = TOP) Label(b, text =", "= LEFT) Button(self.x, command = partial(self.singlePlot,True), text = \"Plot\").pack(side = LEFT) def calibrate(self):", "coefficents ###### Calibrated = mSlope * Uncalibrated + yIntercept self.mSlope = DoubleVar() self.mSlope.set(1)", "self.startButton.configure(state = DISABLED) self.stopButton.configure(state = NORMAL) for b in self.bridges: ### Loop through", "= BOTTOM, fill = X) self.bridgeButton = Button(self.bottomFrame, text = \"Add Bridge\", command", "= calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d, command = partial(self.addPoint,a), text =", "Button(self.x, command = self.saveAs, text = \"Browse...\") browseButton.pack(side = LEFT) calibrateButton = Button(self.x,", "pady = 3) self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration coefficents ###### Calibrated = mSlope", "on the local SD card. # Usage: The program simultaneously reads multiple UDP", "= 2, row = 0) if len(self.pointList) == 0: # If the list", "###### Tkinter variables self.ip = StringVar() self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ###### GUI Initialization", "padx = 15).pack(side = TOP) Entry(b, textvariable = self.mSlope).pack(side = TOP) Label(b, text", "over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d, command = partial(self.addPoint,a),", "# Initialize with one bridge Label(self.bottomFrame, text = \"Neapco Components LLC: <NAME>\\t2016\", font", "ip ###### Interface Initialization Frame.__init__(self.x,master, bd = 2, padx = 3, pady =", "t = Toplevel(self.x) # Open window t.wm_title(\"PORT: \" + str(self.portVar.get()) + \" Calibration\")", "fileLog.write(\"DM_Start=\" + n) isFirst = True ### Boolean to track start time (time", "offset) timeOffset = 0 prevTime = 0 prevAdjusted = 0 while True: ###", "TOP) Label(b, text = \"Y Intercept\", padx = 15).pack(side = TOP) Entry(b, textvariable", "self.startLogging, width = 18) self.startButton.pack(side = LEFT) self.stopButton = Button(self.topFrame, text = \"Stop", "method for each instance plt.plot(xy[0],xy[1]) ### Show the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\")", "\" Calibration\") a = Frame(t) a.pack(side = LEFT) b = Frame(t) b.pack(side =", "exitWindow(self, frame): frame.withdraw() def reopenWindow(self, frame): frame.update() frame.reiconify() class calibrationPoint(Frame) : def __init__(self,", "35,textvariable = self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x, command = self.saveAs,", "= TOP) Button(b, command = partial(self.exitWindow,t), text = \"OK\").pack(side = BOTTOM) def singlePlot(self,", "Values: Slope = \" + str(self.mSlope.get()) + \", Y-Intercept = \" + str(self.yIntercept.get())", "datetime import thread from functools import partial import matplotlib.pyplot as plt from multiprocessing", "x = calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() # Used for debugging only def", "self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration coefficents ###### Calibrated = mSlope * Uncalibrated +", "Tkinter Varibales self.x = Frame() self.isLogging = BooleanVar() self.checkVar = IntVar() self.portVar =", "x in xrange(counter,len(content)-1): ### Starting on the line number found from previous loop", "width = 8) y.pack(side = LEFT, padx = 10, pady = 2) return", "The program reads wirelessly transmitted data from multiple sensors and saves it on", "p in self.processes: ### Iterate through list of process objects p.terminate() ### Terminate", "browseButton = Button(self.x, command = self.saveAs, text = \"Browse...\") browseButton.pack(side = LEFT) calibrateButton", "Frame(t) a.pack(side = LEFT) b = Frame(t) b.pack(side = RIGHT) c = Frame(a)", "multiprocessing import Process n = chr(13) + chr(10) + \"\" class SensorNetwork(Frame): def", "textvariable = self.bitValue, width = 8) x.pack(side = LEFT, padx = 10, pady", "on counter = counter + 1 if line.find(\"DM_Start=\") != -1: break for x", "yIntercept self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount = 0 self.bitPoints =", "fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" + n) fileLog.write(\"DM_AxisUnits.Dim1=[]\" + n) fileLog.write(\"DM_AxisUnits.Dim2=in-lb\"", "= \"OK\").pack(side = BOTTOM) def singlePlot(self, show): f = open(self.filePathVar.get()) content = f.readlines()", "the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData) + n) prevTime = int(fields[0]) prevAdjusted", "self.ip = ip ###### Interface Initialization Frame.__init__(self.x,master, bd = 2, padx = 3,", "pady = 2) return ###### Running code if __name__ == '__main__': root =", "Files','.txt')]) self.filePathVar.set(self.file_path) def addPoint(self, frame): x = calibrationPoint(frame) self.pointList.append(x) def removePoint(self): self.pointList.pop().pack_forget() #", "it to the top of the window self.bridges.append(a) # Add the object to", "from multiprocessing import Process n = chr(13) + chr(10) + \"\" class SensorNetwork(Frame):", "Label(self.x, text = \" File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width = 35,textvariable = self.filePathVar,", "= [] self.pointList = [] #### Starts Writing to File def startLogging(self): print('Sampling", "\"Calibrate!\").pack(side = TOP) Label(b, text = \"Slope\", padx = 15).pack(side = TOP) Entry(b,", "###### Tkinter Varibales self.x = Frame() self.isLogging = BooleanVar() self.checkVar = IntVar() self.portVar", "# Internet socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get()))) ### File Setup fileLog = open(self.filePathVar.get(),", "text = \"Multi-Plot\", command = self.plotMultiple).pack(side = LEFT) self.bottomFrame = Frame(master=self,padx = 8,", "= \"Start Logging\", command = self.startLogging, width = 18) self.startButton.pack(side = LEFT) self.stopButton", "Call the singlePlot method for each instance plt.plot(xy[0],xy[1]) ### Show the plot plt.xlabel(\"Time", "partial(self.addPoint,a), text = \"Add Point\").pack(side = LEFT,fill = X, padx = 10) Button(master", "self.bridges: ### Loop through list of sensors if b.checkVar.get(): ### if box is", "for x in tempList: # Copy points over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get())", "empty for i in xrange(3): self.addPoint(a) else: tempList = self.pointList # Store points", "= array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y)", "Finds slope and y intercept from calibration point cloud def linReg(self): temp1 =", "# The output files are formatted to be imported into the InField data", "Created By: <NAME> # Date: 7/16/2016 # Description: The program reads wirelessly transmitted", "self.ip.set(\"0.0.0.0\") self.isLogging = BooleanVar() ###### GUI Initialization Frame.__init__(self,master, bd = 10) self.pack(side =", "calibrate(self): #if len(self.pointList) is not 0: t = Toplevel(self.x) # Open window t.wm_title(\"PORT:", "timeOffset = prevAdjusted # Then edit the time offset value adjustedTime = int(fields[0])+timeOffset", "content[x].split(\"\\t\") time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\")", "\", Y-Intercept = \" + str(self.yIntercept.get()) + n) fileLog.write(\"DM_Operator=\" + n) fileLog.write(\"DM_NumLogChans=2\" +", "frame.reiconify() class calibrationPoint(Frame) : def __init__(self, master): self.bitValue = DoubleVar(0) self.torqueValue = DoubleVar(0)", "starts on counter = counter + 1 if line.find(\"DM_Start=\") != -1: break for", "Import numpy import sys import os from Tkinter import * import tkMessageBox from", "level window def exitWindow(self, frame): frame.withdraw() def reopenWindow(self, frame): frame.update() frame.reiconify() class calibrationPoint(Frame)", "command = partial(self.singlePlot,True), text = \"Plot\").pack(side = LEFT) def calibrate(self): #if len(self.pointList) is", "L1.pack(side=LEFT) portEntry = Entry(self.x, width = 5, textvariable = self.portVar) portEntry.pack(side=LEFT) L1 =", "self.yIntercept).pack(side = TOP) Button(b, command = partial(self.exitWindow,t), text = \"OK\").pack(side = BOTTOM) def", "If the processor clock has overflowed timeOffset = prevAdjusted # Then edit the", "b in self.bridges: ### Loop through list of sensors if b.checkVar.get(): ### if", "LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side = BOTTOM) def addBridge(self): a = Bridge(self.ip.get(),0, master=self)", "= LEFT, padx = 10, pady = 2) y = Entry(self, textvariable =", "y intercept from calibration point cloud def linReg(self): temp1 = [] temp2 =", "= \"Bit Value\", padx = 15).grid(column = 0,row = 0) Label(c, text =", "self.pointList: print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque Value value: \"+ str(x.torqueValue.get())) # Finds slope", "= \"Remove Bridge\", command = self.removeBridge, width = 18) self.bridgeRemove.pack(side = RIGHT) self.addBridge()", "calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master = d, command = partial(self.addPoint,a), text = \"Add", "self.pointList = [] #### Starts Writing to File def startLogging(self): print('Sampling system on", "\"Multi-Plot\", command = self.plotMultiple).pack(side = LEFT) self.bottomFrame = Frame(master=self,padx = 8, pady =", "RIGHT) Button(self.topFrame, text = \"Multi-Plot\", command = self.plotMultiple).pack(side = LEFT) self.bottomFrame = Frame(master=self,padx", "# Then edit the time offset value adjustedTime = int(fields[0])+timeOffset # Shift every", "### Simultaneously starts logging for all selected bridges def startLogging(self): if not tkMessageBox.askyesno(\"Start", "Label(b, text = \"Slope\", padx = 15).pack(side = TOP) Entry(b, textvariable = self.mSlope).pack(side", "= 8,pady = 8, bd = 2, relief = GROOVE) self.topFrame.pack(side = TOP,", "fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n) fileLog.write(\"DM_NumDims=2\" + n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\"", "createWidgets(self) : x = Entry(self, textvariable = self.bitValue, width = 8) x.pack(side =", "### Necessary formatting for InField compatibility fileLog.write(\"DM_TestTitle=\" + n) fileLog.write(str(self.file_path) + n +", "linalg.lstsq(A.T,B.T)[0] m = round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits a top", "time.append(y[0]) torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show()", "self.startButton.pack(side = LEFT) self.stopButton = Button(self.topFrame, text = \"Stop Logging\", command = self.stopLogging,", "card. # Usage: The program simultaneously reads multiple UDP streams and writes them", "is empty for i in xrange(3): self.addPoint(a) else: tempList = self.pointList # Store", "str(x.bitValue.get())) print(\"Torque Value value: \"+ str(x.torqueValue.get())) # Finds slope and y intercept from", "command = self.saveAs, text = \"Browse...\") browseButton.pack(side = LEFT) calibrateButton = Button(self.x, command", "point cloud def linReg(self): temp1 = [] temp2 = [] for x in", "wirelessly transmitted data from multiple sensors and saves it on the local SD", "###### Interface Initialization Frame.__init__(self.x,master, bd = 2, padx = 3, pady = 3)", "def linReg(self): temp1 = [] temp2 = [] for x in self.pointList: temp1.append(x.bitValue.get())", "y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits a top level window def exitWindow(self,", "self.pointList # Store points in temporary list self.pointList = [] # Empty out", "instance plt.plot(xy[0],xy[1]) ### Show the plot plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\")", "temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A = array([temp1,ones(len(temp1))]) B = array([temp2]) w = linalg.lstsq(A.T,B.T)[0] m =", "= self.bitValue, width = 8) x.pack(side = LEFT, padx = 10, pady =", "Entry(self, textvariable = self.bitValue, width = 8) x.pack(side = LEFT, padx = 10,", "= d, command = partial(self.addPoint,a), text = \"Add Point\").pack(side = LEFT,fill = X,", "= 15).pack(side = TOP) Entry(b, textvariable = self.yIntercept).pack(side = TOP) Button(b, command =", "18) self.startButton.pack(side = LEFT) self.stopButton = Button(self.topFrame, text = \"Stop Logging\", command =", "= Toplevel(self.x) # Open window t.wm_title(\"PORT: \" + str(self.portVar.get()) + \" Calibration\") a", "8) y.pack(side = LEFT, padx = 10, pady = 2) return ###### Running", "padx = 15).grid(column = 0,row = 0) Label(c, text = \"Torque (in-lbs)\", padx", "to the top of the window self.bridges.append(a) # Add the object to self.bridges", "socket.SOCK_DGRAM) # UDP sock.bind((self.ip, (self.portVar.get()))) ### File Setup fileLog = open(self.filePathVar.get(), \"wb\") ###", "padx = 8,pady = 8, bd = 2, relief = GROOVE) self.topFrame.pack(side =", "Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state = DISABLED) for p in self.processes: ### Iterate", "Entry(self.x,width = 35,textvariable = self.filePathVar, text = self.filePathVar.get()) fileEntry.pack(side=LEFT) browseButton = Button(self.x, command", "= \"Include\",variable = self.checkVar) check.pack(side=LEFT) L1 = Label(self.x, text = \" PORT\") L1.pack(side=LEFT)", "# Add the object to self.bridges def removeBridge(self): self.bridges.pop().x.pack_forget() ### Simultaneously starts logging", "Take the time on the very first packet and store it in to", "8, pady = 8, bd = 2, relief = GROOVE) self.bottomFrame.pack(side = BOTTOM,", "p.join() def plotMultiple(self): for b in self.bridges: if b.checkVar.get(): xy = b.singlePlot(False) ###", "3, pady = 3) self.x.pack(side=LEFT) self.createWidgets() ###### linear Calibration coefficents ###### Calibrated =", "+ n) fileLog.write(\"DM_DataMode=1\" + n) fileLog.write(\"DM_DataModeType=TIMHIS\" + n) fileLog.write(\"DM_AxisLabel.Dim1=[]\" + n) fileLog.write(\"DM_AxisLabel.Dim2=Torque\" +", "\"\" class SensorNetwork(Frame): def __init__(self,master=None): ### List of process objects for parallel computing", "the very first packet and store it in to timeOffset isFirst = False", "\"{} \\t {}\" def __init__(self,ip,port,master): ###### Tkinter Varibales self.x = Frame() self.isLogging =", "clock has overflowed timeOffset = prevAdjusted # Then edit the time offset value", "command = self.removeBridge, width = 18) self.bridgeRemove.pack(side = RIGHT) self.addBridge() # Initialize with", "= 8, bd = 2, relief = GROOVE) self.bottomFrame.pack(side = BOTTOM, fill =", "tempList: # Copy points over temp = calibrationPoint(a) temp.bitValue.set(x.bitValue.get()) temp.torqueValue.set(x.torqueValue.get()) self.pointList.append(temp) Button(master =", "temporary list self.pointList = [] # Empty out list for x in tempList:", "+ n) prevTime = int(fields[0]) prevAdjusted = adjustedTime def createWidgets(self): check = Checkbutton(self.x,text", "__init__(self,ip,port,master): ###### Tkinter Varibales self.x = Frame() self.isLogging = BooleanVar() self.checkVar = IntVar()", "y.pack(side = LEFT, padx = 10, pady = 2) return ###### Running code", "addBridge(self): a = Bridge(self.ip.get(),0, master=self) # Create new bridge object a.x.pack(side = TOP)", "= \" File\") L1.pack(side=LEFT) fileEntry = Entry(self.x,width = 35,textvariable = self.filePathVar, text =", "not 0: t = Toplevel(self.x) # Open window t.wm_title(\"PORT: \" + str(self.portVar.get()) +", "def stopLogging(self): print (\"Stopping Data Collection\") self.startButton.configure(state = NORMAL) self.stopButton.configure(state = DISABLED) for", "= DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets() def createWidgets(self) :", "# Finds slope and y intercept from calibration point cloud def linReg(self): temp1", "Initialization Frame.__init__(self.x,master, bd = 2, padx = 3, pady = 3) self.x.pack(side=LEFT) self.createWidgets()", "###### linear Calibration coefficents ###### Calibrated = mSlope * Uncalibrated + yIntercept self.mSlope", "List of bridges self.bridges = [] ###### Tkinter variables self.ip = StringVar() self.ip.set(\"0.0.0.0\")", "self.stopLogging, width = 18, state = DISABLED) self.stopButton.pack(side = RIGHT) Button(self.topFrame, text =", "### if box is checked p = Process(target = b.startLogging) self.processes.append(p) p.start() def", "###### Calibrated = mSlope * Uncalibrated + yIntercept self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept", "master): self.bitValue = DoubleVar(0) self.torqueValue = DoubleVar(0) Frame.__init__(self,master) self.pack(side = TOP) self.createWidgets() def", "command = partial(self.exitWindow,t), text = \"OK\").pack(side = BOTTOM) def singlePlot(self, show): f =", "for x in self.pointList: print(\"bit value: \"+ str(x.bitValue.get())) print(\"Torque Value value: \"+ str(x.torqueValue.get()))", "n) fileLog.write(\"DM_AxisUnits.Dim2=us\" + n) fileLog.write(\"DM_LogicalChan=2\" + n) fileLog.write(\"DM_ChanType=SEQUENTIAL\" + n) fileLog.write(\"DM_ChanName=1\" + n)", "frame.update() frame.reiconify() class calibrationPoint(Frame) : def __init__(self, master): self.bitValue = DoubleVar(0) self.torqueValue =", "with one bridge Label(self.bottomFrame, text = \"Neapco Components LLC: <NAME>\\t2016\", font = (\"Helvetica\",\"12\")).pack(side", "###### Running code if __name__ == '__main__': root = Tk() root.wm_title(\"Gage Logger\") app", "self.mSlope = DoubleVar() self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount = 0 self.bitPoints = []", "= round(float(w[0]),3) y = round(float(w[1]),3) self.mSlope.set(m) self.yIntercept.set(y) # Exits a top level window", "UDP streams and writes them to \".txt\" files. # The output files are", "torque.append(y[1]) if show: plt.plot(time,torque) plt.xlabel(\"Time (microseconds)\") plt.ylabel(\"Torque (inch-pounds)\") plt.title(\"Time vs. Torque\") plt.show() return", "from Tkinter import * import tkMessageBox from tkFileDialog import asksaveasfilename import datetime import", "= \"Multi-Plot\", command = self.plotMultiple).pack(side = LEFT) self.bottomFrame = Frame(master=self,padx = 8, pady", "b = Frame(t) b.pack(side = RIGHT) c = Frame(a) c.pack(side = TOP) d", "= d, command = partial(self.removePoint), text = \"Remove Point\").pack(side = LEFT, fill =", "self.mSlope.set(1) self.yIntercept = DoubleVar(0) self.pointCount = 0 self.bitPoints = [] self.torquePoints = []", "padx = 10, pady = 2) return ###### Running code if __name__ ==", "the list of calibration points is empty for i in xrange(3): self.addPoint(a) else:", "PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x, width = 5, textvariable = self.portVar) portEntry.pack(side=LEFT) L1", "# Empty out list for x in tempList: # Copy points over temp", "out list for x in tempList: # Copy points over temp = calibrationPoint(a)", "= partial(self.exitWindow,t), text = \"OK\").pack(side = BOTTOM) def singlePlot(self, show): f = open(self.filePathVar.get())", "2) y = Entry(self, textvariable = self.torqueValue, width = 8) y.pack(side = LEFT,", "5, textvariable = self.portVar) portEntry.pack(side=LEFT) L1 = Label(self.x, text = \" File\") L1.pack(side=LEFT)", "every subsequent packet by the timeOffset fileLog.write(\"{:.6f}\".format(float(adjustedTime)/1000000) + '\\t' + str(calibratedData) + n)", "# If the list of calibration points is empty for i in xrange(3):", "b.singlePlot(False) ### Call the singlePlot method for each instance plt.plot(xy[0],xy[1]) ### Show the", "self.bottomFrame = Frame(master=self,padx = 8, pady = 8, bd = 2, relief =", "= self.mSlope).pack(side = TOP) Label(b, text = \"Y Intercept\", padx = 15).pack(side =", "files are formatted to be imported into the InField data analysis software. import", "def reopenWindow(self, frame): frame.update() frame.reiconify() class calibrationPoint(Frame) : def __init__(self, master): self.bitValue =", "Checkbutton(self.x,text = \"Include\",variable = self.checkVar) check.pack(side=LEFT) L1 = Label(self.x, text = \" PORT\")", "text = \" PORT\") L1.pack(side=LEFT) portEntry = Entry(self.x, width = 5, textvariable =", "= [] temp2 = [] for x in self.pointList: temp1.append(x.bitValue.get()) temp2.append(x.torqueValue.get()) A =", "= TOP) Entry(b, textvariable = self.yIntercept).pack(side = TOP) Button(b, command = partial(self.exitWindow,t), text" ]
[ ".log import (debug, decrease_verbosity, error, fatal, increase_verbosity, info) from .neocities import Neocities from", "has {local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote file tree...\") remote_filetree", "\"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0: for _ in range(cmdline_opts.quietness):", "cmdline_opts.quietness > 0: for _ in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness < 0: for", "f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file trees...\") applied_actions = 0 for", "reverse=True): info(f'Deleting remote empty directory \"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync for", "= load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not found. Run again with \"--help\"", "local directories with neocities.org sites.\"\"\" import os import sys from . import cmdline", "import sys from . import cmdline from . import local from .config import", "from . import cmdline from . import local from .config import load_config_file from", "error(f\"Error while syncing: {e}\") exit(1) if not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would", "{applied_actions} action(s).\") else: info(f\"Would apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for empty directories...\")", "\"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync for site \"{site}\".') if __name__ ==", "not cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1 elif isinstance(action, DeleteRemote): info(f'Deleting remote file \"{action.path}\":", "client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site \"{site}\".') info(\"Listing local file", "def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0: for _", "action in sync_actions(local_filetree, remote_filetree): try: if isinstance(action, UpdateRemote): info(f'Updating remote file \"{action.path}\": {action.reason}.')", ".ignore_files import IgnoreFiles from .log import (debug, decrease_verbosity, error, fatal, increase_verbosity, info) from", "file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote file tree...\") remote_filetree = client.list()", "elif cmdline_opts.quietness < 0: for _ in range(-cmdline_opts.quietness): increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file)", "info( f\"Remote file tree has {remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing", "from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions from .utils import Pushd def main():", "raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception as e: # noqa: B902 error(f\"Error while", "cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for", "= client.list() info( f\"Remote file tree has {remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\"", "from .neocities import Neocities from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions from .utils", "> 0: for _ in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness < 0: for _", "+= 1 elif isinstance(action, DeleteRemote): info(f'Deleting remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run:", "for site \"{site}\".') info(\"Listing local file tree...\") local_filetree = local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree)", "in sorted(empty_directories, reverse=True): info(f'Deleting remote empty directory \"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished", "tree...\") local_filetree = local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree has {local_filetree.number_of_files()}", "load_config_file from .ignore_files import IgnoreFiles from .log import (debug, decrease_verbosity, error, fatal, increase_verbosity,", "decrease_verbosity() elif cmdline_opts.quietness < 0: for _ in range(-cmdline_opts.quietness): increase_verbosity() try: conf =", "Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site \"{site}\".') info(\"Listing local file tree...\") local_filetree = local.filetree(\".\")", "{action.reason}.') else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception as e: # noqa: B902", "remote_filetree = client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\") for empty_dir in", "DeleteRemote, DoNothing, UpdateRemote, sync_actions from .utils import Pushd def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts", "for _ in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness < 0: for _ in range(-cmdline_opts.quietness):", "local file tree...\") local_filetree = local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree", "range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness < 0: for _ in range(-cmdline_opts.quietness): increase_verbosity() try: conf", "file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1 elif isinstance(action, DoNothing):", "info(f\"Would apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for empty directories...\") remote_filetree = client.list()", "and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote file tree...\") remote_filetree = client.list() info( f\"Remote", "syncing: {e}\") exit(1) if not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would apply {applied_actions}", "RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception as e: # noqa: B902 error(f\"Error while syncing:", "dir(s).\" ) info(\"Comparing file trees...\") applied_actions = 0 for action in sync_actions(local_filetree, remote_filetree):", "{applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for empty directories...\") remote_filetree = client.list() empty_directories =", "= client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\") for empty_dir in sorted(empty_directories,", "again with \"--help\" for more info.') exit(1) for site, site_conf in conf.items(): client", "local_filetree = local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree has {local_filetree.number_of_files()} file(s)\"", "for more info.') exit(1) for site, site_conf in conf.items(): client = Neocities(site_conf.api_key) with", "for action in sync_actions(local_filetree, remote_filetree): try: if isinstance(action, UpdateRemote): info(f'Updating remote file \"{action.path}\":", "directories...\") remote_filetree = client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\") for empty_dir", "info(\"Fetching remote file tree...\") remote_filetree = client.list() info( f\"Remote file tree has {remote_filetree.number_of_files()}\"", "if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync for site \"{site}\".') if __name__ == \"__main__\":", "FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not found. Run again with \"--help\" for more info.')", "sys from . import cmdline from . import local from .config import load_config_file", "remote_filetree = client.list() info( f\"Remote file tree has {remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()}", "+= 1 elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\")", "increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not found. Run", "fatal, increase_verbosity, info) from .neocities import Neocities from .sync_actions import DeleteRemote, DoNothing, UpdateRemote,", "error, fatal, increase_verbosity, info) from .neocities import Neocities from .sync_actions import DeleteRemote, DoNothing,", "for empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting remote empty directory \"{empty_dir}\"') if not cmdline_opts.dry_run:", "import load_config_file from .ignore_files import IgnoreFiles from .log import (debug, decrease_verbosity, error, fatal,", "info(f'Starting sync for site \"{site}\".') info(\"Listing local file tree...\") local_filetree = local.filetree(\".\") local_filetree", "with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site \"{site}\".') info(\"Listing local file tree...\") local_filetree =", "trees...\") applied_actions = 0 for action in sync_actions(local_filetree, remote_filetree): try: if isinstance(action, UpdateRemote):", "empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting remote empty directory \"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir)", "tree...\") remote_filetree = client.list() info( f\"Remote file tree has {remote_filetree.number_of_files()}\" f\" file(s) and", "IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree has {local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\" )", "IgnoreFiles from .log import (debug, decrease_verbosity, error, fatal, increase_verbosity, info) from .neocities import", "Pushd def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0: for", "\"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1 elif isinstance(action, DoNothing): debug(f'Skipping", "cmdline_opts.quietness < 0: for _ in range(-cmdline_opts.quietness): increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file) except", "info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for empty", "from .utils import Pushd def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness", "applied_actions += 1 elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown action", "sync_actions from .utils import Pushd def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if", "noqa: B902 error(f\"Error while syncing: {e}\") exit(1) if not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\")", "cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0: for _ in range(cmdline_opts.quietness): decrease_verbosity() elif", "DoNothing, UpdateRemote, sync_actions from .utils import Pushd def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts =", "= cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0: for _ in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness", "< 0: for _ in range(-cmdline_opts.quietness): increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError:", "in range(-cmdline_opts.quietness): increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not", "sites.\"\"\" import os import sys from . import cmdline from . import local", "for _ in range(-cmdline_opts.quietness): increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file", "info(f'Deleting remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1 elif", "{remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file trees...\") applied_actions = 0 for action in sync_actions(local_filetree,", "\"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception as e: # noqa:", "e: # noqa: B902 error(f\"Error while syncing: {e}\") exit(1) if not cmdline_opts.dry_run: info(f\"Applied", "not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching", "info(f\"Found {len(empty_directories)} empty dir(s).\") for empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting remote empty directory", "action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for empty directories...\") remote_filetree = client.list() empty_directories = remote_filetree.list_empty_directories()", "remote file tree...\") remote_filetree = client.list() info( f\"Remote file tree has {remote_filetree.number_of_files()}\" f\"", "file tree...\") remote_filetree = client.list() info( f\"Remote file tree has {remote_filetree.number_of_files()}\" f\" file(s)", "file tree has {remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file trees...\")", "empty directory \"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync for site \"{site}\".') if", "empty dir(s).\") for empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting remote empty directory \"{empty_dir}\"') if", "not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync for site \"{site}\".') if __name__ == \"__main__\": main()", "fatal(f'Config file \"{cmdline_opts.config_file}\" not found. Run again with \"--help\" for more info.') exit(1)", "remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\") for empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting remote empty", "file(s) and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file trees...\") applied_actions = 0 for action", "from .config import load_config_file from .ignore_files import IgnoreFiles from .log import (debug, decrease_verbosity,", "client.list() info( f\"Remote file tree has {remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\" )", "{e}\") exit(1) if not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would apply {applied_actions} action(s).\")", "with neocities.org sites.\"\"\" import os import sys from . import cmdline from .", "import cmdline from . import local from .config import load_config_file from .ignore_files import", "info.') exit(1) for site, site_conf in conf.items(): client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting", "= 0 for action in sync_actions(local_filetree, remote_filetree): try: if isinstance(action, UpdateRemote): info(f'Updating remote", ".sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions from .utils import Pushd def main(): \"\"\"Program", "Run again with \"--help\" for more info.') exit(1) for site, site_conf in conf.items():", "site_conf in conf.items(): client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site \"{site}\".')", "in conf.items(): client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site \"{site}\".') info(\"Listing", "elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception", "{remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file trees...\") applied_actions = 0", "if isinstance(action, UpdateRemote): info(f'Updating remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path) applied_actions", "if site_conf.remove_empty_dirs: info(\"Searching for empty directories...\") remote_filetree = client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found", "empty directories...\") remote_filetree = client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\") for", "_ in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness < 0: for _ in range(-cmdline_opts.quietness): increase_verbosity()", "= Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site \"{site}\".') info(\"Listing local file tree...\")", "applied_actions = 0 for action in sync_actions(local_filetree, remote_filetree): try: if isinstance(action, UpdateRemote): info(f'Updating", "while syncing: {e}\") exit(1) if not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would apply", "try: conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not found. Run again", "info(\"Searching for empty directories...\") remote_filetree = client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty", "file \"{cmdline_opts.config_file}\" not found. Run again with \"--help\" for more info.') exit(1) for", ") info(\"Comparing file trees...\") applied_actions = 0 for action in sync_actions(local_filetree, remote_filetree): try:", "\"\"\"Sync local directories with neocities.org sites.\"\"\" import os import sys from . import", "site_conf.remove_empty_dirs: info(\"Searching for empty directories...\") remote_filetree = client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)}", "import DeleteRemote, DoNothing, UpdateRemote, sync_actions from .utils import Pushd def main(): \"\"\"Program entry-point.\"\"\"", "info(f'Updating remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1 elif", "\"{site}\".') info(\"Listing local file tree...\") local_filetree = local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local", "isinstance(action, UpdateRemote): info(f'Updating remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path) applied_actions +=", "for empty directories...\") remote_filetree = client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\")", "UpdateRemote, sync_actions from .utils import Pushd def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:])", "directories with neocities.org sites.\"\"\" import os import sys from . import cmdline from", "range(-cmdline_opts.quietness): increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not found.", "import Neocities from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions from .utils import Pushd", "f\"Local file tree has {local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote", "client.delete(action.path) applied_actions += 1 elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown", "0 for action in sync_actions(local_filetree, remote_filetree): try: if isinstance(action, UpdateRemote): info(f'Updating remote file", "debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception as e: #", "cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1 elif isinstance(action, DeleteRemote): info(f'Deleting remote file \"{action.path}\": {action.reason}.')", "= local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree has {local_filetree.number_of_files()} file(s)\" f\"", "file tree has {local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote file", "if cmdline_opts.quietness > 0: for _ in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness < 0:", "conf.items(): client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site \"{site}\".') info(\"Listing local", "Exception as e: # noqa: B902 error(f\"Error while syncing: {e}\") exit(1) if not", "remote empty directory \"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync for site \"{site}\".')", "info( f\"Local file tree has {local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching", "0: for _ in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness < 0: for _ in", "client.upload(action.path) applied_actions += 1 elif isinstance(action, DeleteRemote): info(f'Deleting remote file \"{action.path}\": {action.reason}.') if", "1 elif isinstance(action, DeleteRemote): info(f'Deleting remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path)", "else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception as e: # noqa: B902 error(f\"Error", "= IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree has {local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\"", "os import sys from . import cmdline from . import local from .config", "DeleteRemote): info(f'Deleting remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1", "from . import local from .config import load_config_file from .ignore_files import IgnoreFiles from", "0: for _ in range(-cmdline_opts.quietness): increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config", "as e: # noqa: B902 error(f\"Error while syncing: {e}\") exit(1) if not cmdline_opts.dry_run:", "apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for empty directories...\") remote_filetree = client.list() empty_directories", ".config import load_config_file from .ignore_files import IgnoreFiles from .log import (debug, decrease_verbosity, error,", "sync for site \"{site}\".') info(\"Listing local file tree...\") local_filetree = local.filetree(\".\") local_filetree =", "{action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1 elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\":", "exit(1) if not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would apply {applied_actions} action(s).\") if", "conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not found. Run again with", "import os import sys from . import cmdline from . import local from", "import local from .config import load_config_file from .ignore_files import IgnoreFiles from .log import", "and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file trees...\") applied_actions = 0 for action in", "try: if isinstance(action, UpdateRemote): info(f'Updating remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path)", "{len(empty_directories)} empty dir(s).\") for empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting remote empty directory \"{empty_dir}\"')", "if not cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1 elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.')", "from .ignore_files import IgnoreFiles from .log import (debug, decrease_verbosity, error, fatal, increase_verbosity, info)", "in sync_actions(local_filetree, remote_filetree): try: if isinstance(action, UpdateRemote): info(f'Updating remote file \"{action.path}\": {action.reason}.') if", "site \"{site}\".') info(\"Listing local file tree...\") local_filetree = local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info(", "if not cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1 elif isinstance(action, DeleteRemote): info(f'Deleting remote file", "if not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else: info(f\"Would apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs:", "decrease_verbosity, error, fatal, increase_verbosity, info) from .neocities import Neocities from .sync_actions import DeleteRemote,", "DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception as e:", "remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1 elif isinstance(action,", "directory \"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync for site \"{site}\".') if __name__", "has {remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file trees...\") applied_actions =", "sorted(empty_directories, reverse=True): info(f'Deleting remote empty directory \"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync", "remote_filetree): try: if isinstance(action, UpdateRemote): info(f'Updating remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run:", "more info.') exit(1) for site, site_conf in conf.items(): client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)):", "\"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1 elif isinstance(action, DeleteRemote): info(f'Deleting", "action {action.__class__.__name__}.\") except Exception as e: # noqa: B902 error(f\"Error while syncing: {e}\")", "in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness < 0: for _ in range(-cmdline_opts.quietness): increase_verbosity() try:", "f\"Remote file tree has {remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file", "B902 error(f\"Error while syncing: {e}\") exit(1) if not cmdline_opts.dry_run: info(f\"Applied {applied_actions} action(s).\") else:", "# noqa: B902 error(f\"Error while syncing: {e}\") exit(1) if not cmdline_opts.dry_run: info(f\"Applied {applied_actions}", "except Exception as e: # noqa: B902 error(f\"Error while syncing: {e}\") exit(1) if", "not cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1 elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else:", ".utils import Pushd def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness >", "1 elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except", "info(\"Comparing file trees...\") applied_actions = 0 for action in sync_actions(local_filetree, remote_filetree): try: if", "f\" and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote file tree...\") remote_filetree = client.list() info(", "\"{cmdline_opts.config_file}\" not found. Run again with \"--help\" for more info.') exit(1) for site,", ".neocities import Neocities from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions from .utils import", "Neocities from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions from .utils import Pushd def", "found. Run again with \"--help\" for more info.') exit(1) for site, site_conf in", "tree has {remote_filetree.number_of_files()}\" f\" file(s) and {remote_filetree.number_of_directories()} dir(s).\" ) info(\"Comparing file trees...\") applied_actions", "file trees...\") applied_actions = 0 for action in sync_actions(local_filetree, remote_filetree): try: if isinstance(action,", "entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0: for _ in range(cmdline_opts.quietness): decrease_verbosity()", "sync_actions(local_filetree, remote_filetree): try: if isinstance(action, UpdateRemote): info(f'Updating remote file \"{action.path}\": {action.reason}.') if not", "applied_actions += 1 elif isinstance(action, DeleteRemote): info(f'Deleting remote file \"{action.path}\": {action.reason}.') if not", ") info(\"Fetching remote file tree...\") remote_filetree = client.list() info( f\"Remote file tree has", "else: info(f\"Would apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for empty directories...\") remote_filetree =", "remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1 elif isinstance(action,", "info) from .neocities import Neocities from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions from", "info(\"Listing local file tree...\") local_filetree = local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file", "exit(1) for site, site_conf in conf.items(): client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync", "Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site \"{site}\".') info(\"Listing local file tree...\") local_filetree", "file tree...\") local_filetree = local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree has", "action(s).\") else: info(f\"Would apply {applied_actions} action(s).\") if site_conf.remove_empty_dirs: info(\"Searching for empty directories...\") remote_filetree", "{local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote file tree...\") remote_filetree =", "local from .config import load_config_file from .ignore_files import IgnoreFiles from .log import (debug,", ". import local from .config import load_config_file from .ignore_files import IgnoreFiles from .log", "with \"--help\" for more info.') exit(1) for site, site_conf in conf.items(): client =", "{local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote file tree...\") remote_filetree = client.list() info( f\"Remote file", "empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\") for empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting", "= remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\") for empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting remote", "local.filetree(\".\") local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree has {local_filetree.number_of_files()} file(s)\" f\" and", "local_filetree = IgnoreFiles(site_conf).filter(local_filetree) info( f\"Local file tree has {local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()}", "client.list() empty_directories = remote_filetree.list_empty_directories() info(f\"Found {len(empty_directories)} empty dir(s).\") for empty_dir in sorted(empty_directories, reverse=True):", "for site, site_conf in conf.items(): client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for", "increase_verbosity, info) from .neocities import Neocities from .sync_actions import DeleteRemote, DoNothing, UpdateRemote, sync_actions", "neocities.org sites.\"\"\" import os import sys from . import cmdline from . import", "dir(s).\" ) info(\"Fetching remote file tree...\") remote_filetree = client.list() info( f\"Remote file tree", "import (debug, decrease_verbosity, error, fatal, increase_verbosity, info) from .neocities import Neocities from .sync_actions", "cmdline from . import local from .config import load_config_file from .ignore_files import IgnoreFiles", "except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not found. Run again with \"--help\" for more", "from .log import (debug, decrease_verbosity, error, fatal, increase_verbosity, info) from .neocities import Neocities", "cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0: for _ in range(cmdline_opts.quietness): decrease_verbosity() elif cmdline_opts.quietness <", "cmdline_opts.dry_run: client.delete(action.path) applied_actions += 1 elif isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise", "tree has {local_filetree.number_of_files()} file(s)\" f\" and {local_filetree.number_of_directories()} dir(s).\" ) info(\"Fetching remote file tree...\")", "isinstance(action, DoNothing): debug(f'Skipping \"{action.path}\": {action.reason}.') else: raise RuntimeError(f\"Unknown action {action.__class__.__name__}.\") except Exception as", "main(): \"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0: for _ in", "dir(s).\") for empty_dir in sorted(empty_directories, reverse=True): info(f'Deleting remote empty directory \"{empty_dir}\"') if not", "\"--help\" for more info.') exit(1) for site, site_conf in conf.items(): client = Neocities(site_conf.api_key)", "isinstance(action, DeleteRemote): info(f'Deleting remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path) applied_actions +=", "UpdateRemote): info(f'Updating remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1", "(debug, decrease_verbosity, error, fatal, increase_verbosity, info) from .neocities import Neocities from .sync_actions import", "{action.__class__.__name__}.\") except Exception as e: # noqa: B902 error(f\"Error while syncing: {e}\") exit(1)", "_ in range(-cmdline_opts.quietness): increase_verbosity() try: conf = load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\"", "import IgnoreFiles from .log import (debug, decrease_verbosity, error, fatal, increase_verbosity, info) from .neocities", "file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1 elif isinstance(action, DeleteRemote):", "info(f'Deleting remote empty directory \"{empty_dir}\"') if not cmdline_opts.dry_run: client.delete(empty_dir) info(f'Finished sync for site", "{action.reason}.') if not cmdline_opts.dry_run: client.upload(action.path) applied_actions += 1 elif isinstance(action, DeleteRemote): info(f'Deleting remote", "elif isinstance(action, DeleteRemote): info(f'Deleting remote file \"{action.path}\": {action.reason}.') if not cmdline_opts.dry_run: client.delete(action.path) applied_actions", "load_config_file(cmdline_opts.config_file) except FileNotFoundError: fatal(f'Config file \"{cmdline_opts.config_file}\" not found. Run again with \"--help\" for", "import Pushd def main(): \"\"\"Program entry-point.\"\"\" cmdline_opts = cmdline.parse(sys.argv[1:]) if cmdline_opts.quietness > 0:", "not found. Run again with \"--help\" for more info.') exit(1) for site, site_conf", ". import cmdline from . import local from .config import load_config_file from .ignore_files", "site, site_conf in conf.items(): client = Neocities(site_conf.api_key) with Pushd(os.path.expanduser(site_conf.root_dir)): info(f'Starting sync for site" ]
[ "if isinstance(use_instead, Type) # type: ignore else use_instead ) warnings.warn( f\"Please use {new_class_name}", "): new_class_name = ( use_instead.__name__ # type: ignore if isinstance(use_instead, Type) # type:", "{new_class_name} instead, \" f\"{deprecated_name} will be removed in happyly v{removing_in_version}.\", DeprecationWarning, stacklevel=stacklevel, )", "f\"Please use {new_class_name} instead, \" f\"{deprecated_name} will be removed in happyly v{removing_in_version}.\", DeprecationWarning,", "use_instead.__name__ # type: ignore if isinstance(use_instead, Type) # type: ignore else use_instead )", "new_class_name = ( use_instead.__name__ # type: ignore if isinstance(use_instead, Type) # type: ignore", "from typing import Type, Union def will_be_removed( deprecated_name: str, use_instead: Union[str, Type], removing_in_version:", "else use_instead ) warnings.warn( f\"Please use {new_class_name} instead, \" f\"{deprecated_name} will be removed", "ignore else use_instead ) warnings.warn( f\"Please use {new_class_name} instead, \" f\"{deprecated_name} will be", "Union[str, Type], removing_in_version: str, stacklevel=2, ): new_class_name = ( use_instead.__name__ # type: ignore", "ignore if isinstance(use_instead, Type) # type: ignore else use_instead ) warnings.warn( f\"Please use", "removing_in_version: str, stacklevel=2, ): new_class_name = ( use_instead.__name__ # type: ignore if isinstance(use_instead,", "# type: ignore if isinstance(use_instead, Type) # type: ignore else use_instead ) warnings.warn(", "Type) # type: ignore else use_instead ) warnings.warn( f\"Please use {new_class_name} instead, \"", "Union def will_be_removed( deprecated_name: str, use_instead: Union[str, Type], removing_in_version: str, stacklevel=2, ): new_class_name", "use_instead: Union[str, Type], removing_in_version: str, stacklevel=2, ): new_class_name = ( use_instead.__name__ # type:", "warnings.warn( f\"Please use {new_class_name} instead, \" f\"{deprecated_name} will be removed in happyly v{removing_in_version}.\",", "import warnings from typing import Type, Union def will_be_removed( deprecated_name: str, use_instead: Union[str,", "def will_be_removed( deprecated_name: str, use_instead: Union[str, Type], removing_in_version: str, stacklevel=2, ): new_class_name =", "str, stacklevel=2, ): new_class_name = ( use_instead.__name__ # type: ignore if isinstance(use_instead, Type)", "type: ignore else use_instead ) warnings.warn( f\"Please use {new_class_name} instead, \" f\"{deprecated_name} will", "will_be_removed( deprecated_name: str, use_instead: Union[str, Type], removing_in_version: str, stacklevel=2, ): new_class_name = (", "# type: ignore else use_instead ) warnings.warn( f\"Please use {new_class_name} instead, \" f\"{deprecated_name}", ") warnings.warn( f\"Please use {new_class_name} instead, \" f\"{deprecated_name} will be removed in happyly", "use_instead ) warnings.warn( f\"Please use {new_class_name} instead, \" f\"{deprecated_name} will be removed in", "isinstance(use_instead, Type) # type: ignore else use_instead ) warnings.warn( f\"Please use {new_class_name} instead,", "type: ignore if isinstance(use_instead, Type) # type: ignore else use_instead ) warnings.warn( f\"Please", "stacklevel=2, ): new_class_name = ( use_instead.__name__ # type: ignore if isinstance(use_instead, Type) #", "( use_instead.__name__ # type: ignore if isinstance(use_instead, Type) # type: ignore else use_instead", "typing import Type, Union def will_be_removed( deprecated_name: str, use_instead: Union[str, Type], removing_in_version: str,", "Type], removing_in_version: str, stacklevel=2, ): new_class_name = ( use_instead.__name__ # type: ignore if", "Type, Union def will_be_removed( deprecated_name: str, use_instead: Union[str, Type], removing_in_version: str, stacklevel=2, ):", "import Type, Union def will_be_removed( deprecated_name: str, use_instead: Union[str, Type], removing_in_version: str, stacklevel=2,", "= ( use_instead.__name__ # type: ignore if isinstance(use_instead, Type) # type: ignore else", "use {new_class_name} instead, \" f\"{deprecated_name} will be removed in happyly v{removing_in_version}.\", DeprecationWarning, stacklevel=stacklevel,", "str, use_instead: Union[str, Type], removing_in_version: str, stacklevel=2, ): new_class_name = ( use_instead.__name__ #", "deprecated_name: str, use_instead: Union[str, Type], removing_in_version: str, stacklevel=2, ): new_class_name = ( use_instead.__name__", "warnings from typing import Type, Union def will_be_removed( deprecated_name: str, use_instead: Union[str, Type]," ]
[ "= input() template = r'never gonna let you down...' match = re.match(template, string,", "string = input() template = r'never gonna let you down...' match = re.match(template,", "import re string = input() template = r'never gonna let you down...' match", "re string = input() template = r'never gonna let you down...' match =", "input() template = r'never gonna let you down...' match = re.match(template, string, flags=re.IGNORECASE)" ]
[ "= metadata['purpose_en'] except KeyError: webpage_object.purpose_en = 'no info provided' try: webpage_object.git_url = metadata['github']", "KeyError: webpage_object.version = 'no info provided' try: webpage_object.last_commit = metadata['last_commit'] except KeyError: webpage_object.last_commit", "KeyError: webpage_object.purpose_en = 'no info provided' try: webpage_object.git_url = metadata['github'] except KeyError: webpage_object.git_url", "'no info provided' try: webpage_object.description = metadata['description'] except KeyError: webpage_object.description = 'no info", "metadata['base_tech'] except KeyError: webpage_object.base_tech = 'no info provided' try: webpage_object.framework = metadata['framework'] except", "webpage_object.purpose_en = 'no info provided' try: webpage_object.git_url = metadata['github'] except KeyError: webpage_object.git_url =", "instance of the class appreg.models.WebApp :param metadata: A dictionary providing string for the", "webpage_object.title = metadata['title'] except KeyError: webpage_object.title = 'no info provided' try: webpage_object.subtitle =", "= 'no info provided' try: webpage_object.app_type = metadata['app_type'] except KeyError: webpage_object.app_type = 'no", "the class appreg.models.WebApp. \"\"\" try: webpage_object.title = metadata['title'] except KeyError: webpage_object.title = 'no", "KeyError: webpage_object.title = 'no info provided' try: webpage_object.subtitle = metadata['subtitle'] except KeyError: webpage_object.subtitle", "webpage_object.subtitle = metadata['subtitle'] except KeyError: webpage_object.subtitle = 'no info provided' try: webpage_object.author =", "= metadata['version'] except KeyError: webpage_object.version = 'no info provided' try: webpage_object.last_commit = metadata['last_commit']", "'no info provided' try: webpage_object.author = metadata['author'] except KeyError: webpage_object.author = 'no info", "= 'no info provided' try: webpage_object.base_tech = metadata['base_tech'] except KeyError: webpage_object.base_tech = 'no", "webpage_object.git_url = 'no info provided' try: webpage_object.app_type = metadata['app_type'] except KeyError: webpage_object.app_type =", "metadata['subtitle'] except KeyError: webpage_object.subtitle = 'no info provided' try: webpage_object.author = metadata['author'] except", "metadata['title_img'] except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo'] except KeyError: webpage_object.project_logo", "'no info provided' try: webpage_object.framework = metadata['framework'] except KeyError: webpage_object.framework = 'no info", "webpage_object.last_commit = metadata['last_commit'] except KeyError: webpage_object.last_commit = 'no info provided' try: webpage_object.title_img =", "the parsed data :param webpage_object: An instance of the class appreg.models.WebApp :param metadata:", "= metadata['app_type'] except KeyError: webpage_object.app_type = 'no info provided' try: webpage_object.base_tech = metadata['base_tech']", "= 'no info provided' try: webpage_object.git_url = metadata['github'] except KeyError: webpage_object.git_url = 'no", "try: webpage_object.purpose_en = metadata['purpose_en'] except KeyError: webpage_object.purpose_en = 'no info provided' try: webpage_object.git_url", "provided' try: webpage_object.subtitle = metadata['subtitle'] except KeyError: webpage_object.subtitle = 'no info provided' try:", "except KeyError: webpage_object.purpose_en = 'no info provided' try: webpage_object.git_url = metadata['github'] except KeyError:", "KeyError: webpage_object.description = 'no info provided' try: webpage_object.purpose_en = metadata['purpose_en'] except KeyError: webpage_object.purpose_en", "\"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return: An instance of the", "provided' try: webpage_object.version = metadata['version'] except KeyError: webpage_object.version = 'no info provided' try:", "populate_webapp(webpage_object, metadata): \"\"\" parses a metadata_dict and populates a webpage object with the", "try: webpage_object.version = metadata['version'] except KeyError: webpage_object.version = 'no info provided' try: webpage_object.last_commit", ":return: An instance of the class appreg.models.WebApp. \"\"\" try: webpage_object.title = metadata['title'] except", "of the class appreg.models.WebApp. \"\"\" try: webpage_object.title = metadata['title'] except KeyError: webpage_object.title =", "providing string for the following keys [\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\",", "KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo'] except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO", "'no info provided' try: webpage_object.last_commit = metadata['last_commit'] except KeyError: webpage_object.last_commit = 'no info", "except KeyError: webpage_object.description = 'no info provided' try: webpage_object.purpose_en = metadata['purpose_en'] except KeyError:", "from django.conf import settings def populate_webapp(webpage_object, metadata): \"\"\" parses a metadata_dict and populates", "metadata['description'] except KeyError: webpage_object.description = 'no info provided' try: webpage_object.purpose_en = metadata['purpose_en'] except", "= settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo'] except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public", "try: webpage_object.project_logo = metadata['project_logo'] except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public = metadata['public']", "\"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return: An instance of the class appreg.models.WebApp.", "webpage_object.version = metadata['version'] except KeyError: webpage_object.version = 'no info provided' try: webpage_object.last_commit =", ":param webpage_object: An instance of the class appreg.models.WebApp :param metadata: A dictionary providing", "info provided' try: webpage_object.framework = metadata['framework'] except KeyError: webpage_object.framework = 'no info provided'", "info provided' try: webpage_object.version = metadata['version'] except KeyError: webpage_object.version = 'no info provided'", "following keys [\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return: An", "webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public = metadata['public'] except KeyError: webpage_object.public = 'restricted' return", "webpage_object.project_logo = metadata['project_logo'] except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public = metadata['public'] except", "provided' try: webpage_object.last_commit = metadata['last_commit'] except KeyError: webpage_object.last_commit = 'no info provided' try:", "except KeyError: webpage_object.app_type = 'no info provided' try: webpage_object.base_tech = metadata['base_tech'] except KeyError:", "try: webpage_object.framework = metadata['framework'] except KeyError: webpage_object.framework = 'no info provided' try: webpage_object.version", "metadata['purpose_en'] except KeyError: webpage_object.purpose_en = 'no info provided' try: webpage_object.git_url = metadata['github'] except", "info provided' try: webpage_object.description = metadata['description'] except KeyError: webpage_object.description = 'no info provided'", "info provided' try: webpage_object.app_type = metadata['app_type'] except KeyError: webpage_object.app_type = 'no info provided'", "'no info provided' try: webpage_object.version = metadata['version'] except KeyError: webpage_object.version = 'no info", "webpage_object.base_tech = metadata['base_tech'] except KeyError: webpage_object.base_tech = 'no info provided' try: webpage_object.framework =", "= 'no info provided' try: webpage_object.last_commit = metadata['last_commit'] except KeyError: webpage_object.last_commit = 'no", "webpage_object.description = metadata['description'] except KeyError: webpage_object.description = 'no info provided' try: webpage_object.purpose_en =", "populates a webpage object with the parsed data :param webpage_object: An instance of", "\"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return: An instance of the class appreg.models.WebApp. \"\"\" try:", "class appreg.models.WebApp :param metadata: A dictionary providing string for the following keys [\"title\",\\", "'no info provided' try: webpage_object.subtitle = metadata['subtitle'] except KeyError: webpage_object.subtitle = 'no info", "'no info provided' try: webpage_object.title_img = metadata['title_img'] except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try:", "webpage_object.app_type = metadata['app_type'] except KeyError: webpage_object.app_type = 'no info provided' try: webpage_object.base_tech =", "of the class appreg.models.WebApp :param metadata: A dictionary providing string for the following", "data :param webpage_object: An instance of the class appreg.models.WebApp :param metadata: A dictionary", "appreg.models.WebApp :param metadata: A dictionary providing string for the following keys [\"title\",\\ \"subtitle\",", "= settings.DEFAULT_LOGO try: webpage_object.public = metadata['public'] except KeyError: webpage_object.public = 'restricted' return webpage_object", "try: webpage_object.title_img = metadata['title_img'] except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo']", "info provided' try: webpage_object.subtitle = metadata['subtitle'] except KeyError: webpage_object.subtitle = 'no info provided'", "metadata['project_logo'] except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public = metadata['public'] except KeyError: webpage_object.public", "\"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return: An instance of the class", "webpage_object.last_commit = 'no info provided' try: webpage_object.title_img = metadata['title_img'] except KeyError: webpage_object.title_img =", "parsed data :param webpage_object: An instance of the class appreg.models.WebApp :param metadata: A", "KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public = metadata['public'] except KeyError: webpage_object.public = 'restricted'", "try: webpage_object.subtitle = metadata['subtitle'] except KeyError: webpage_object.subtitle = 'no info provided' try: webpage_object.author", "KeyError: webpage_object.last_commit = 'no info provided' try: webpage_object.title_img = metadata['title_img'] except KeyError: webpage_object.title_img", ":param metadata: A dictionary providing string for the following keys [\"title\",\\ \"subtitle\", \"author\",", "metadata_dict and populates a webpage object with the parsed data :param webpage_object: An", "= 'no info provided' try: webpage_object.framework = metadata['framework'] except KeyError: webpage_object.framework = 'no", "info provided' try: webpage_object.last_commit = metadata['last_commit'] except KeyError: webpage_object.last_commit = 'no info provided'", "= 'no info provided' try: webpage_object.title_img = metadata['title_img'] except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG", "info provided' try: webpage_object.base_tech = metadata['base_tech'] except KeyError: webpage_object.base_tech = 'no info provided'", "provided' try: webpage_object.description = metadata['description'] except KeyError: webpage_object.description = 'no info provided' try:", "metadata): \"\"\" parses a metadata_dict and populates a webpage object with the parsed", "= 'no info provided' try: webpage_object.purpose_en = metadata['purpose_en'] except KeyError: webpage_object.purpose_en = 'no", "except KeyError: webpage_object.framework = 'no info provided' try: webpage_object.version = metadata['version'] except KeyError:", "= metadata['description'] except KeyError: webpage_object.description = 'no info provided' try: webpage_object.purpose_en = metadata['purpose_en']", "except KeyError: webpage_object.version = 'no info provided' try: webpage_object.last_commit = metadata['last_commit'] except KeyError:", "def populate_webapp(webpage_object, metadata): \"\"\" parses a metadata_dict and populates a webpage object with", "\"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return: An instance of the class appreg.models.WebApp. \"\"\"", "info provided' try: webpage_object.git_url = metadata['github'] except KeyError: webpage_object.git_url = 'no info provided'", "the class appreg.models.WebApp :param metadata: A dictionary providing string for the following keys", "keys [\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return: An instance", "= 'no info provided' try: webpage_object.version = metadata['version'] except KeyError: webpage_object.version = 'no", "'no info provided' try: webpage_object.git_url = metadata['github'] except KeyError: webpage_object.git_url = 'no info", "metadata['version'] except KeyError: webpage_object.version = 'no info provided' try: webpage_object.last_commit = metadata['last_commit'] except", "django.conf import settings def populate_webapp(webpage_object, metadata): \"\"\" parses a metadata_dict and populates a", "import settings def populate_webapp(webpage_object, metadata): \"\"\" parses a metadata_dict and populates a webpage", "KeyError: webpage_object.subtitle = 'no info provided' try: webpage_object.author = metadata['author'] except KeyError: webpage_object.author", "info provided' try: webpage_object.purpose_en = metadata['purpose_en'] except KeyError: webpage_object.purpose_en = 'no info provided'", "provided' try: webpage_object.git_url = metadata['github'] except KeyError: webpage_object.git_url = 'no info provided' try:", "try: webpage_object.author = metadata['author'] except KeyError: webpage_object.author = 'no info provided' try: webpage_object.description", "except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo'] except KeyError: webpage_object.project_logo =", "for the following keys [\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"]", "webpage_object.description = 'no info provided' try: webpage_object.purpose_en = metadata['purpose_en'] except KeyError: webpage_object.purpose_en =", "webpage_object.title = 'no info provided' try: webpage_object.subtitle = metadata['subtitle'] except KeyError: webpage_object.subtitle =", "= metadata['project_logo'] except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public = metadata['public'] except KeyError:", "= metadata['title_img'] except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo'] except KeyError:", "webpage_object.author = 'no info provided' try: webpage_object.description = metadata['description'] except KeyError: webpage_object.description =", "\"framework\"] :return: An instance of the class appreg.models.WebApp. \"\"\" try: webpage_object.title = metadata['title']", "= metadata['subtitle'] except KeyError: webpage_object.subtitle = 'no info provided' try: webpage_object.author = metadata['author']", "webpage_object.title_img = metadata['title_img'] except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo'] except", "= metadata['github'] except KeyError: webpage_object.git_url = 'no info provided' try: webpage_object.app_type = metadata['app_type']", "object with the parsed data :param webpage_object: An instance of the class appreg.models.WebApp", "webpage_object: An instance of the class appreg.models.WebApp :param metadata: A dictionary providing string", "\"\"\" parses a metadata_dict and populates a webpage object with the parsed data", "= 'no info provided' try: webpage_object.author = metadata['author'] except KeyError: webpage_object.author = 'no", "instance of the class appreg.models.WebApp. \"\"\" try: webpage_object.title = metadata['title'] except KeyError: webpage_object.title", "\"\"\" try: webpage_object.title = metadata['title'] except KeyError: webpage_object.title = 'no info provided' try:", "= metadata['author'] except KeyError: webpage_object.author = 'no info provided' try: webpage_object.description = metadata['description']", "webpage_object.base_tech = 'no info provided' try: webpage_object.framework = metadata['framework'] except KeyError: webpage_object.framework =", "metadata['framework'] except KeyError: webpage_object.framework = 'no info provided' try: webpage_object.version = metadata['version'] except", "and populates a webpage object with the parsed data :param webpage_object: An instance", "info provided' try: webpage_object.author = metadata['author'] except KeyError: webpage_object.author = 'no info provided'", "try: webpage_object.last_commit = metadata['last_commit'] except KeyError: webpage_object.last_commit = 'no info provided' try: webpage_object.title_img", "webpage_object.git_url = metadata['github'] except KeyError: webpage_object.git_url = 'no info provided' try: webpage_object.app_type =", "'no info provided' try: webpage_object.base_tech = metadata['base_tech'] except KeyError: webpage_object.base_tech = 'no info", "[\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return: An instance of", "metadata['title'] except KeyError: webpage_object.title = 'no info provided' try: webpage_object.subtitle = metadata['subtitle'] except", "provided' try: webpage_object.framework = metadata['framework'] except KeyError: webpage_object.framework = 'no info provided' try:", "= metadata['title'] except KeyError: webpage_object.title = 'no info provided' try: webpage_object.subtitle = metadata['subtitle']", "metadata['last_commit'] except KeyError: webpage_object.last_commit = 'no info provided' try: webpage_object.title_img = metadata['title_img'] except", "string for the following keys [\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\", "metadata: A dictionary providing string for the following keys [\"title\",\\ \"subtitle\", \"author\", \"description\",", "webpage_object.author = metadata['author'] except KeyError: webpage_object.author = 'no info provided' try: webpage_object.description =", "try: webpage_object.git_url = metadata['github'] except KeyError: webpage_object.git_url = 'no info provided' try: webpage_object.app_type", "try: webpage_object.app_type = metadata['app_type'] except KeyError: webpage_object.app_type = 'no info provided' try: webpage_object.base_tech", "try: webpage_object.title = metadata['title'] except KeyError: webpage_object.title = 'no info provided' try: webpage_object.subtitle", "'no info provided' try: webpage_object.app_type = metadata['app_type'] except KeyError: webpage_object.app_type = 'no info", "webpage_object.purpose_en = metadata['purpose_en'] except KeyError: webpage_object.purpose_en = 'no info provided' try: webpage_object.git_url =", "KeyError: webpage_object.git_url = 'no info provided' try: webpage_object.app_type = metadata['app_type'] except KeyError: webpage_object.app_type", "= 'no info provided' try: webpage_object.description = metadata['description'] except KeyError: webpage_object.description = 'no", "webpage_object.app_type = 'no info provided' try: webpage_object.base_tech = metadata['base_tech'] except KeyError: webpage_object.base_tech =", "'no info provided' try: webpage_object.purpose_en = metadata['purpose_en'] except KeyError: webpage_object.purpose_en = 'no info", "a webpage object with the parsed data :param webpage_object: An instance of the", "= metadata['base_tech'] except KeyError: webpage_object.base_tech = 'no info provided' try: webpage_object.framework = metadata['framework']", "a metadata_dict and populates a webpage object with the parsed data :param webpage_object:", "appreg.models.WebApp. \"\"\" try: webpage_object.title = metadata['title'] except KeyError: webpage_object.title = 'no info provided'", "except KeyError: webpage_object.git_url = 'no info provided' try: webpage_object.app_type = metadata['app_type'] except KeyError:", "provided' try: webpage_object.purpose_en = metadata['purpose_en'] except KeyError: webpage_object.purpose_en = 'no info provided' try:", "provided' try: webpage_object.app_type = metadata['app_type'] except KeyError: webpage_object.app_type = 'no info provided' try:", "except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public = metadata['public'] except KeyError: webpage_object.public =", "except KeyError: webpage_object.subtitle = 'no info provided' try: webpage_object.author = metadata['author'] except KeyError:", "webpage_object.subtitle = 'no info provided' try: webpage_object.author = metadata['author'] except KeyError: webpage_object.author =", "A dictionary providing string for the following keys [\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\",", "An instance of the class appreg.models.WebApp. \"\"\" try: webpage_object.title = metadata['title'] except KeyError:", "webpage_object.framework = metadata['framework'] except KeyError: webpage_object.framework = 'no info provided' try: webpage_object.version =", "class appreg.models.WebApp. \"\"\" try: webpage_object.title = metadata['title'] except KeyError: webpage_object.title = 'no info", "= 'no info provided' try: webpage_object.subtitle = metadata['subtitle'] except KeyError: webpage_object.subtitle = 'no", "KeyError: webpage_object.framework = 'no info provided' try: webpage_object.version = metadata['version'] except KeyError: webpage_object.version", "settings def populate_webapp(webpage_object, metadata): \"\"\" parses a metadata_dict and populates a webpage object", "metadata['github'] except KeyError: webpage_object.git_url = 'no info provided' try: webpage_object.app_type = metadata['app_type'] except", "try: webpage_object.description = metadata['description'] except KeyError: webpage_object.description = 'no info provided' try: webpage_object.purpose_en", "try: webpage_object.base_tech = metadata['base_tech'] except KeyError: webpage_object.base_tech = 'no info provided' try: webpage_object.framework", "with the parsed data :param webpage_object: An instance of the class appreg.models.WebApp :param", "KeyError: webpage_object.base_tech = 'no info provided' try: webpage_object.framework = metadata['framework'] except KeyError: webpage_object.framework", "info provided' try: webpage_object.title_img = metadata['title_img'] except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo", "webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo'] except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try:", "provided' try: webpage_object.author = metadata['author'] except KeyError: webpage_object.author = 'no info provided' try:", "KeyError: webpage_object.author = 'no info provided' try: webpage_object.description = metadata['description'] except KeyError: webpage_object.description", "provided' try: webpage_object.title_img = metadata['title_img'] except KeyError: webpage_object.title_img = settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo =", "webpage object with the parsed data :param webpage_object: An instance of the class", "the following keys [\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\", \"app_type\", \"base_tech\",\\ \"framework\"] :return:", "metadata['author'] except KeyError: webpage_object.author = 'no info provided' try: webpage_object.description = metadata['description'] except", "except KeyError: webpage_object.author = 'no info provided' try: webpage_object.description = metadata['description'] except KeyError:", "parses a metadata_dict and populates a webpage object with the parsed data :param", "<reponame>acdh-oeaw/dar from django.conf import settings def populate_webapp(webpage_object, metadata): \"\"\" parses a metadata_dict and", "except KeyError: webpage_object.title = 'no info provided' try: webpage_object.subtitle = metadata['subtitle'] except KeyError:", "= metadata['framework'] except KeyError: webpage_object.framework = 'no info provided' try: webpage_object.version = metadata['version']", "dictionary providing string for the following keys [\"title\",\\ \"subtitle\", \"author\", \"description\", \"purpose_en\", \"gitbub\",", "\"base_tech\",\\ \"framework\"] :return: An instance of the class appreg.models.WebApp. \"\"\" try: webpage_object.title =", "except KeyError: webpage_object.base_tech = 'no info provided' try: webpage_object.framework = metadata['framework'] except KeyError:", "settings.DEFAULT_TITLE_IMG try: webpage_object.project_logo = metadata['project_logo'] except KeyError: webpage_object.project_logo = settings.DEFAULT_LOGO try: webpage_object.public =", "except KeyError: webpage_object.last_commit = 'no info provided' try: webpage_object.title_img = metadata['title_img'] except KeyError:", "KeyError: webpage_object.app_type = 'no info provided' try: webpage_object.base_tech = metadata['base_tech'] except KeyError: webpage_object.base_tech", "provided' try: webpage_object.base_tech = metadata['base_tech'] except KeyError: webpage_object.base_tech = 'no info provided' try:", "webpage_object.framework = 'no info provided' try: webpage_object.version = metadata['version'] except KeyError: webpage_object.version =", "An instance of the class appreg.models.WebApp :param metadata: A dictionary providing string for", "\"app_type\", \"base_tech\",\\ \"framework\"] :return: An instance of the class appreg.models.WebApp. \"\"\" try: webpage_object.title", "metadata['app_type'] except KeyError: webpage_object.app_type = 'no info provided' try: webpage_object.base_tech = metadata['base_tech'] except", "webpage_object.version = 'no info provided' try: webpage_object.last_commit = metadata['last_commit'] except KeyError: webpage_object.last_commit =", "= metadata['last_commit'] except KeyError: webpage_object.last_commit = 'no info provided' try: webpage_object.title_img = metadata['title_img']" ]
[ "*255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved (%d/%d) \", i,", "= model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch, Generator Loss: %f, add loss: %f, perceptual_loss:", "started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for i, data in enumerate(dataset): if not os.path.exists('./test_result'):", "a batch data pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset) num_batch =", "model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch,", "pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset) num_batch = num_dataset/args.batch_num sess =", "%f, gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp) if (iter+1) % 50 ==", "(%d/%d) \", i, len(dataset)) logging.info(\"[*] test done\") if __name__ == '__main__': import argparse", "from models.cgan_model import cgan from models.ops import * os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150,", "for _ in range(args.iter_disc): loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d", "len(dataset)) logging.info(\"[*] test done\") if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='')", "default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/')", "type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str,", "os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']:", "numpy as np import cv2 import data.data_loader as loader from models.cgan_model import cgan", "loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset) num_batch = num_dataset/args.batch_num sess = tf.Session() model =", "Training Discriminator for _ in range(args.iter_disc): loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d", "default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/')", "adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch, Generator Loss: %f, add loss:", "== 0 or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\") dataset =", "(iter+1) % 50 == 0 or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test", "step_decay = (initial-0.0)/decay_period update_step = max(0, step-start_step) current_value = max(0, initial - (update_step)*step_decay)", "loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch, Discriminator Loss: %f,", "enumerate(dataset): blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']:", "= (initial-0.0)/decay_period update_step = max(0, step-start_step) current_value = max(0, initial - (update_step)*step_decay) return", "done\") if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1)", "update_step = max(0, step-start_step) current_value = max(0, initial - (update_step)*step_decay) return current_value def", "= max(0, initial - (update_step)*step_decay) return current_value def train(args): #assume there is a", "blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur", "parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train',", "default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true') args = parser.parse_args() log_format = '[%(asctime)s %(levelname)s]", "linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): ''' return decayed learning rate It becomes 0 at", "or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name)", "%(levelname)s] %(message)s' level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level, format=log_format, stream=sys.stderr) logging.getLogger(\"DeblurGAN_TRAIN.*\").setLevel(level)", "= '[%(asctime)s %(levelname)s] %(message)s' level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level, format=log_format,", "return decayed learning rate It becomes 0 at end_step ''' decay_period = end_step", "default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256)", "real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img} G_out", "args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0", "data pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset) num_batch = num_dataset/args.batch_num sess", "import time import os import sys import logging import json import tensorflow as", "__name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int,", "cgan from models.ops import * os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): ''' return", "(G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved (%d/%d) \", i, len(dataset)) logging.info(\"[*] test done\") if", "args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.)", "import cv2 import data.data_loader as loader from models.cgan_model import cgan from models.ops import", "time import os import sys import logging import json import tensorflow as tf", "max(0, step-start_step) current_value = max(0, initial - (update_step)*step_decay) return current_value def train(args): #assume", "= model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is", "model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in range(args.epoch): learning_rate = linear_decay(0.0001, iter) for i, data", "model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for i, data in", "if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w))", "\", i, len(dataset)) logging.info(\"[*] test done\") if __name__ == '__main__': import argparse parser", "0 at end_step ''' decay_period = end_step - start_step step_decay = (initial-0.0)/decay_period update_step", "step=0, start_step=150, end_step=300): ''' return decayed learning rate It becomes 0 at end_step", "os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G =", "parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug',", "dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for i, data in enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result')", "cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in range(args.epoch): learning_rate = linear_decay(0.0001, iter)", "{model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch,", "default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int, default=3)", "import sys import logging import json import tensorflow as tf import numpy as", "feed_dict_G = {model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.)", "args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss,", "parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor',", "Loss: %f, loss_disc: %f, gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp) if (iter+1)", "parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir',", "parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true') args = parser.parse_args() log_format = '[%(asctime)s %(levelname)s] %(message)s'", "step-start_step) current_value = max(0, initial - (update_step)*step_decay) return current_value def train(args): #assume there", "= tf.Session() model = cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in range(args.epoch):", "(initial-0.0)/decay_period update_step = max(0, step-start_step) current_value = max(0, initial - (update_step)*step_decay) return current_value", "sess = tf.Session() model = cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in", "logging.info('%d epoch, %d batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f',\\ iter,", "for i, data in enumerate(dataset): blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h,", "rate It becomes 0 at end_step ''' decay_period = end_step - start_step step_decay", "= model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss:", "'[%(asctime)s %(levelname)s] %(message)s' level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level, format=log_format, stream=sys.stderr)", "tf import numpy as np import cv2 import data.data_loader as loader from models.cgan_model", "blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img}", "perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch, Generator Loss: %f, add loss: %f,", "= args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G,", "parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true') args =", "args = parser.parse_args() log_format = '[%(asctime)s %(levelname)s] %(message)s' level = logging.DEBUG if args.debug", "if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc',", "current_value def train(args): #assume there is a batch data pair: dataset = loader.read_data_path(args.data_path_train,", "learning_rate = linear_decay(0.0001, iter) for i, data in enumerate(dataset): blur_img, real_img = loader.read_image_pair(data,", "perceptual_loss: %f',\\ iter, i, loss_G, adv_loss, perceptual_loss) #Ready for Training Discriminator for _", "blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\", "args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss =", "cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved (%d/%d)", "num_batch = num_dataset/args.batch_num sess = tf.Session() model = cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir)", "for Training Discriminator for _ in range(args.iter_disc): loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor)", "type=str, default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true')", "iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for", "num_dataset/args.batch_num sess = tf.Session() model = cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter", "adv_loss, perceptual_loss) #Ready for Training Discriminator for _ in range(args.iter_disc): loss_D, loss_disc, loss_gp", "is saved (%d/%d) \", i, len(dataset)) logging.info(\"[*] test done\") if __name__ == '__main__':", "model.load_weights(args.checkpoint_dir) for iter in range(args.epoch): learning_rate = linear_decay(0.0001, iter) for i, data in", "parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test',", "loss_disc, loss_gp) if (iter+1) % 50 == 0 or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir,", "(real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved (%d/%d) \", i, len(dataset)) logging.info(\"[*]", "os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): ''' return decayed learning rate It becomes", "import os import sys import logging import json import tensorflow as tf import", "in range(args.epoch): learning_rate = linear_decay(0.0001, iter) for i, data in enumerate(dataset): blur_img, real_img", "import logging import json import tensorflow as tf import numpy as np import", "iter in range(args.epoch): learning_rate = linear_decay(0.0001, iter) for i, data in enumerate(dataset): blur_img,", "real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch, Generator", "%f', iter, i, loss_D, loss_disc, loss_gp) if (iter+1) % 50 == 0 or", "Image is saved (%d/%d) \", i, len(dataset)) logging.info(\"[*] test done\") if __name__ ==", "(update_step)*step_decay) return current_value def train(args): #assume there is a batch data pair: dataset", "def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): ''' return decayed learning rate It becomes 0", "parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop',", "type=int, default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true') args = parser.parse_args() log_format = '[%(asctime)s", "import * os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): ''' return decayed learning rate", "= loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset) num_batch = num_dataset/args.batch_num sess = tf.Session() model", "default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False)", "= {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d", "blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d", "i, loss_D, loss_disc, loss_gp) if (iter+1) % 50 == 0 or iter ==", "model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f',", "default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model')", "= argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int,", "parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true') args = parser.parse_args() log_format =", "== '__main__': import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5)", "loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch, Generator Loss: %f, add", "type=int, default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true') args = parser.parse_args()", "cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved (%d/%d) \", i, len(dataset))", "It becomes 0 at end_step ''' decay_period = end_step - start_step step_decay =", "max(0, initial - (update_step)*step_decay) return current_value def train(args): #assume there is a batch", "tf.Session() model = cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in range(args.epoch): learning_rate", "argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int, default=300)", "start_step step_decay = (initial-0.0)/decay_period update_step = max(0, step-start_step) current_value = max(0, initial -", "import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int,", "resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate}", "%(message)s' level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level, format=log_format, stream=sys.stderr) logging.getLogger(\"DeblurGAN_TRAIN.*\").setLevel(level) train(args)", "= args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0", "if (iter+1) % 50 == 0 or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!]", "from __future__ import print_function import time import os import sys import logging import", "models.ops import * os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): ''' return decayed learning", "Discriminator Loss: %f, loss_disc: %f, gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp) if", "current_value = max(0, initial - (update_step)*step_decay) return current_value def train(args): #assume there is", "import cgan from models.ops import * os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): '''", "%f',\\ iter, i, loss_G, adv_loss, perceptual_loss) #Ready for Training Discriminator for _ in", "logging.info('%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f', iter, i,", "default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO')", "%d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f', iter, i, loss_D, loss_disc,", "parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir',", "parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch',", "= cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in range(args.epoch): learning_rate = linear_decay(0.0001,", "data in enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data, resize_or_crop =", "loss_G, adv_loss, perceptual_loss) #Ready for Training Discriminator for _ in range(args.iter_disc): loss_D, loss_disc,", "= num_dataset/args.batch_num sess = tf.Session() model = cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for", "action='store_true') args = parser.parse_args() log_format = '[%(asctime)s %(levelname)s] %(message)s' level = logging.DEBUG if", "cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved (%d/%d) \", i, len(dataset)) logging.info(\"[*] test done\")", "default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize')", "= loader.read_data_path(args.data_path_test, name=args.data_name) for i, data in enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img,", "model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in range(args.epoch): learning_rate = linear_decay(0.0001, iter) for i,", "args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in range(args.epoch): learning_rate = linear_decay(0.0001, iter) for", "num_dataset = len(dataset) num_batch = num_dataset/args.batch_num sess = tf.Session() model = cgan(sess, args)", "i, loss_G, adv_loss, perceptual_loss) #Ready for Training Discriminator for _ in range(args.iter_disc): loss_D,", "parser.add_argument('--debug', action='store_true') args = parser.parse_args() log_format = '[%(asctime)s %(levelname)s] %(message)s' level = logging.DEBUG", "loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f,", "iter, i, loss_G, adv_loss, perceptual_loss) #Ready for Training Discriminator for _ in range(args.iter_disc):", "default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true') args = parser.parse_args() log_format", "in enumerate(dataset): blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict =", "is a batch data pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset) num_batch", "logging.info(\"[!] test started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for i, data in enumerate(dataset): if", "feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict)", "model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f',\\", "= len(dataset) num_batch = num_dataset/args.batch_num sess = tf.Session() model = cgan(sess, args) model.build_model()", "initial - (update_step)*step_decay) return current_value def train(args): #assume there is a batch data", "loader from models.cgan_model import cgan from models.ops import * os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0,", "log_format = '[%(asctime)s %(levelname)s] %(message)s' level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level,", "end_step=300): ''' return decayed learning rate It becomes 0 at end_step ''' decay_period", "data.data_loader as loader from models.cgan_model import cgan from models.ops import * os.system('http_proxy_on') def", "i, len(dataset)) logging.info(\"[*] test done\") if __name__ == '__main__': import argparse parser =", "as tf import numpy as np import cv2 import data.data_loader as loader from", "test done\") if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int,", "''' return decayed learning rate It becomes 0 at end_step ''' decay_period =", "parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c',", "os import sys import logging import json import tensorflow as tf import numpy", "import tensorflow as tf import numpy as np import cv2 import data.data_loader as", "== (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for i,", "0 or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\") dataset = loader.read_data_path(args.data_path_test,", "start_step=150, end_step=300): ''' return decayed learning rate It becomes 0 at end_step '''", "%f, add loss: %f, perceptual_loss: %f',\\ iter, i, loss_G, adv_loss, perceptual_loss) #Ready for", "parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name',", "import numpy as np import cv2 import data.data_loader as loader from models.cgan_model import", "loss_gp) if (iter+1) % 50 == 0 or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step)", "G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image", "print_function import time import os import sys import logging import json import tensorflow", "becomes 0 at end_step ''' decay_period = end_step - start_step step_decay = (initial-0.0)/decay_period", "parser.parse_args() log_format = '[%(asctime)s %(levelname)s] %(message)s' level = logging.DEBUG if args.debug else logging.INFO", "np import cv2 import data.data_loader as loader from models.cgan_model import cgan from models.ops", "- (update_step)*step_decay) return current_value def train(args): #assume there is a batch data pair:", "for iter in range(args.epoch): learning_rate = linear_decay(0.0001, iter) for i, data in enumerate(dataset):", "there is a batch data pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset)", "i, data in enumerate(dataset): blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w))", "data in enumerate(dataset): blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict", "resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png',", "linear_decay(0.0001, iter) for i, data in enumerate(dataset): blur_img, real_img = loader.read_image_pair(data, resize_or_crop =", "% 50 == 0 or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\")", "import data.data_loader as loader from models.cgan_model import cgan from models.ops import * os.system('http_proxy_on')", "import json import tensorflow as tf import numpy as np import cv2 import", "models.cgan_model import cgan from models.ops import * os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300):", "_ in range(args.iter_disc): loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch,", "batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f',\\ iter, i, loss_G, adv_loss,", "%f, perceptual_loss: %f',\\ iter, i, loss_G, adv_loss, perceptual_loss) #Ready for Training Discriminator for", "dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset) num_batch = num_dataset/args.batch_num sess = tf.Session()", "learning rate It becomes 0 at end_step ''' decay_period = end_step - start_step", "loss_disc: %f, gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp) if (iter+1) % 50", "name=args.data_name) for i, data in enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img =", "decay_period = end_step - start_step step_decay = (initial-0.0)/decay_period update_step = max(0, step-start_step) current_value", "50 == 0 or iter == (args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\") dataset", "(args.epoch-1): model.save_weights(args.checkpoint_dir, model.global_step) logging.info(\"[!] test started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for i, data", "type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int,", "= loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\", "Loss: %f, add loss: %f, perceptual_loss: %f',\\ iter, i, loss_G, adv_loss, perceptual_loss) #Ready", "model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved", "Discriminator for _ in range(args.iter_disc): loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch,", "not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G", "loss_D, loss_disc, loss_gp) if (iter+1) % 50 == 0 or iter == (args.epoch-1):", "type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str,", "= parser.parse_args() log_format = '[%(asctime)s %(levelname)s] %(message)s' level = logging.DEBUG if args.debug else", "batch data pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset = len(dataset) num_batch = num_dataset/args.batch_num", "argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int, default=1)", "i, data in enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data, resize_or_crop", "range(args.iter_disc): loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch, Discriminator Loss:", "as loader from models.cgan_model import cgan from models.ops import * os.system('http_proxy_on') def linear_decay(initial=0.0001,", "loader.read_data_path(args.data_path_test, name=args.data_name) for i, data in enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img", "type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str,", "type=int, default=3) parser.add_argument('--debug', action='store_true') args = parser.parse_args() log_format = '[%(asctime)s %(levelname)s] %(message)s' level", "add loss: %f, perceptual_loss: %f',\\ iter, i, loss_G, adv_loss, perceptual_loss) #Ready for Training", "type=int, default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str,", "with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f', iter,", "default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int, default=256)", "default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/')", "tensorflow as tf import numpy as np import cv2 import data.data_loader as loader", "sys import logging import json import tensorflow as tf import numpy as np", "iter) for i, data in enumerate(dataset): blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop,", "loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch, Discriminator Loss: %f, loss_disc:", "(blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved (%d/%d) \",", "*255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.) logging.info(\"Deblur Image is saved (%d/%d) \", i, len(dataset)) logging.info(\"[*] test", "type=int, default=5) parser.add_argument('--batch_num', type=int, default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str,", "- start_step step_decay = (initial-0.0)/decay_period update_step = max(0, step-start_step) current_value = max(0, initial", "logging.info(\"Deblur Image is saved (%d/%d) \", i, len(dataset)) logging.info(\"[*] test done\") if __name__", "gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp) if (iter+1) % 50 == 0", "learning_rate} loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch, Generator Loss: %f,", "type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int,", "parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h',", "type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str,", "= max(0, step-start_step) current_value = max(0, initial - (update_step)*step_decay) return current_value def train(args):", "%f, loss_disc: %f, gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp) if (iter+1) %", "perceptual_loss) #Ready for Training Discriminator for _ in range(args.iter_disc): loss_D, loss_disc, loss_gp =", "{model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png', (G_out[0]+1.0)/2.0*255.)", "def train(args): #assume there is a batch data pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name)", "import print_function import time import os import sys import logging import json import", "%d batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f',\\ iter, i, loss_G,", "in enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop,", "cv2 import data.data_loader as loader from models.cgan_model import cgan from models.ops import *", "= loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img} G_out =", "default=1) parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/')", "real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']:", "return current_value def train(args): #assume there is a batch data pair: dataset =", "in range(args.iter_disc): loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict, with_image=args.tf_image_monitor) logging.info('%d epoch, %d batch, Discriminator", "parser.add_argument('--epoch', type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name',", "iter, i, loss_D, loss_disc, loss_gp) if (iter+1) % 50 == 0 or iter", "from models.ops import * os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): ''' return decayed", "type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str, default='DeblurGAN.model') parser.add_argument('--summary_dir', type=str, default='./summaries/') parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool,", "model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss = model.run_optim_G(feed_dict=feed_dict) logging.info('%d epoch, %d batch, Generator Loss:", "logging.info(\"[*] test done\") if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen',", "image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png',", "epoch, %d batch, Generator Loss: %f, add loss: %f, perceptual_loss: %f',\\ iter, i,", "for i, data in enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data,", "Generator Loss: %f, add loss: %f, perceptual_loss: %f',\\ iter, i, loss_G, adv_loss, perceptual_loss)", "train(args): #assume there is a batch data pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset", "batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f', iter, i, loss_D, loss_disc, loss_gp)", "default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int, default=256) parser.add_argument('--img_c', type=int, default=3) parser.add_argument('--debug', action='store_true') args", "range(args.epoch): learning_rate = linear_decay(0.0001, iter) for i, data in enumerate(dataset): blur_img, real_img =", "as np import cv2 import data.data_loader as loader from models.cgan_model import cgan from", "= linear_decay(0.0001, iter) for i, data in enumerate(dataset): blur_img, real_img = loader.read_image_pair(data, resize_or_crop", "name=args.data_name) num_dataset = len(dataset) num_batch = num_dataset/args.batch_num sess = tf.Session() model = cgan(sess,", "end_step ''' decay_period = end_step - start_step step_decay = (initial-0.0)/decay_period update_step = max(0,", "test started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for i, data in enumerate(dataset): if not", "#assume there is a batch data pair: dataset = loader.read_data_path(args.data_path_train, name=args.data_name) num_dataset =", "model.global_step) logging.info(\"[!] test started\") dataset = loader.read_data_path(args.data_path_test, name=args.data_name) for i, data in enumerate(dataset):", "type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w', type=int,", "image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate: learning_rate} loss_G, adv_loss, perceptual_loss", "#Ready for Training Discriminator for _ in range(args.iter_disc): loss_D, loss_disc, loss_gp = model.run_optim_D(feed_dict=feed_dict,", "default=3) parser.add_argument('--debug', action='store_true') args = parser.parse_args() log_format = '[%(asctime)s %(levelname)s] %(message)s' level =", "type=int, default=300) parser.add_argument('--data_path_train', type=str, default='/data/private/data/GOPRO_Large/train/') parser.add_argument('--data_path_test', type=str, default='/data/private/data/GOPRO_Large/test/') parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints/') parser.add_argument('--model_name', type=str,", "loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict_G = {model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G)", "decayed learning rate It becomes 0 at end_step ''' decay_period = end_step -", "model = cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer()) model.load_weights(args.checkpoint_dir) for iter in range(args.epoch): learning_rate =", "* os.system('http_proxy_on') def linear_decay(initial=0.0001, step=0, start_step=150, end_step=300): ''' return decayed learning rate It", "'__main__': import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--iter_gen', type=int, default=1) parser.add_argument('--iter_disc', type=int, default=5) parser.add_argument('--batch_num',", "__future__ import print_function import time import os import sys import logging import json", "epoch, %d batch, Discriminator Loss: %f, loss_disc: %f, gp_loss: %f', iter, i, loss_D,", "end_step - start_step step_decay = (initial-0.0)/decay_period update_step = max(0, step-start_step) current_value = max(0,", "saved (%d/%d) \", i, len(dataset)) logging.info(\"[*] test done\") if __name__ == '__main__': import", "= end_step - start_step step_decay = (initial-0.0)/decay_period update_step = max(0, step-start_step) current_value =", "parser.add_argument('--data_name', type=str, default='GOPRO') parser.add_argument('--tf_image_monitor', type=bool, default=False) parser.add_argument('--resize_or_crop', type=str, default='resize') parser.add_argument('--img_h', type=int, default=256) parser.add_argument('--img_w',", "enumerate(dataset): if not os.path.exists('./test_result'): os.mkdir('./test_result') blur_img, real_img = loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h,", "''' decay_period = end_step - start_step step_decay = (initial-0.0)/decay_period update_step = max(0, step-start_step)", "loader.read_image_pair(data, resize_or_crop = args.resize_or_crop, image_size=(args.img_h, args.img_w)) feed_dict = {model.input['blur_img']: blur_img,\\ model.input['real_img']: real_img,\\ model.learning_rate:", "at end_step ''' decay_period = end_step - start_step step_decay = (initial-0.0)/decay_period update_step =", "logging import json import tensorflow as tf import numpy as np import cv2", "loss: %f, perceptual_loss: %f',\\ iter, i, loss_G, adv_loss, perceptual_loss) #Ready for Training Discriminator", "= {model.input['blur_img']: blur_img} G_out = model.G_output(feed_dict=feed_dict_G) cv2.imwrite('./test_result/'+str(i)+'_blur.png', (blur_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_real.png', (real_img[0]+1.0)/2.0 *255.) cv2.imwrite('./test_result/'+str(i)+'_gen.png',", "json import tensorflow as tf import numpy as np import cv2 import data.data_loader", "len(dataset) num_batch = num_dataset/args.batch_num sess = tf.Session() model = cgan(sess, args) model.build_model() model.sess.run(tf.global_variables_initializer())" ]
[ "boto3 class SSMClient: def __init__(self, config, **client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config = config", "else: params = self.client().describe_parameters()['Parameters'] return [key['Name'] for key in params] def put(self, key,", "self.session = boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name = 'default' def client(self): return self.session.client('ssm')", "config self.profile_name = 'default' def client(self): return self.session.client('ssm') def get(self, name): return self.client().get_parameter(", "[ parameters_path, ] }, ])['Parameters'] else: params = self.client().describe_parameters()['Parameters'] return [key['Name'] for key", "}, ])['Parameters'] else: params = self.client().describe_parameters()['Parameters'] return [key['Name'] for key in params] def", "put(self, key, value, description): self.client().put_parameter( Name=key, Description=description, Value=value, Type='SecureString' if self.ssm_config.should_auto_encrypt_secret_value() else 'String'", "self.session.client('ssm') def get(self, name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''): if", "{ 'Key': 'Path', 'Values': [ parameters_path, ] }, ])['Parameters'] else: params = self.client().describe_parameters()['Parameters']", "'Path', 'Values': [ parameters_path, ] }, ])['Parameters'] else: params = self.client().describe_parameters()['Parameters'] return [key['Name']", "'Key': 'Path', 'Values': [ parameters_path, ] }, ])['Parameters'] else: params = self.client().describe_parameters()['Parameters'] return", "params = self.client().describe_parameters()['Parameters'] return [key['Name'] for key in params] def put(self, key, value,", "return self.session.client('ssm') def get(self, name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''):", "self.ssm_config = config self.profile_name = 'default' def client(self): return self.session.client('ssm') def get(self, name):", "get(self, name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''): if parameters_path: params", "class SSMClient: def __init__(self, config, **client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name", "params = self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values': [ parameters_path, ] }, ])['Parameters'] else:", "= 'default' def client(self): return self.session.client('ssm') def get(self, name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value()", "params] def put(self, key, value, description): self.client().put_parameter( Name=key, Description=description, Value=value, Type='SecureString' if self.ssm_config.should_auto_encrypt_secret_value()", "describe(self, parameters_path=''): if parameters_path: params = self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values': [ parameters_path,", "**client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name = 'default' def client(self): return", "= config self.profile_name = 'default' def client(self): return self.session.client('ssm') def get(self, name): return", "__init__(self, config, **client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name = 'default' def", "Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''): if parameters_path: params = self.client().describe_parameters(ParameterFilters=[ { 'Key':", "'Values': [ parameters_path, ] }, ])['Parameters'] else: params = self.client().describe_parameters()['Parameters'] return [key['Name'] for", "parameters_path, ] }, ])['Parameters'] else: params = self.client().describe_parameters()['Parameters'] return [key['Name'] for key in", "def get(self, name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''): if parameters_path:", "for key in params] def put(self, key, value, description): self.client().put_parameter( Name=key, Description=description, Value=value,", "in params] def put(self, key, value, description): self.client().put_parameter( Name=key, Description=description, Value=value, Type='SecureString' if", "config, **client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name = 'default' def client(self):", "key in params] def put(self, key, value, description): self.client().put_parameter( Name=key, Description=description, Value=value, Type='SecureString'", ")['Parameter']['Value'] def describe(self, parameters_path=''): if parameters_path: params = self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values':", "client(self): return self.session.client('ssm') def get(self, name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self,", "<reponame>eyalstoler/ssm-simple-cli import boto3 class SSMClient: def __init__(self, config, **client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config", "key, value, description): self.client().put_parameter( Name=key, Description=description, Value=value, Type='SecureString' if self.ssm_config.should_auto_encrypt_secret_value() else 'String' )", "[key['Name'] for key in params] def put(self, key, value, description): self.client().put_parameter( Name=key, Description=description,", "])['Parameters'] else: params = self.client().describe_parameters()['Parameters'] return [key['Name'] for key in params] def put(self,", "'default' def client(self): return self.session.client('ssm') def get(self, name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value']", "self.client().describe_parameters()['Parameters'] return [key['Name'] for key in params] def put(self, key, value, description): self.client().put_parameter(", "return [key['Name'] for key in params] def put(self, key, value, description): self.client().put_parameter( Name=key,", "parameters_path=''): if parameters_path: params = self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values': [ parameters_path, ]", "return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''): if parameters_path: params = self.client().describe_parameters(ParameterFilters=[", "boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name = 'default' def client(self): return self.session.client('ssm') def get(self,", "import boto3 class SSMClient: def __init__(self, config, **client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config =", "self.profile_name = 'default' def client(self): return self.session.client('ssm') def get(self, name): return self.client().get_parameter( Name=name,", "WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''): if parameters_path: params = self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path',", "parameters_path: params = self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values': [ parameters_path, ] }, ])['Parameters']", "def __init__(self, config, **client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name = 'default'", "def client(self): return self.session.client('ssm') def get(self, name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def", "name): return self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''): if parameters_path: params =", "def describe(self, parameters_path=''): if parameters_path: params = self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values': [", "SSMClient: def __init__(self, config, **client_kwargs): self.session = boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name =", "self.client().get_parameter( Name=name, WithDecryption=self.ssm_config.should_auto_decrypt_secret_value() )['Parameter']['Value'] def describe(self, parameters_path=''): if parameters_path: params = self.client().describe_parameters(ParameterFilters=[ {", "if parameters_path: params = self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values': [ parameters_path, ] },", "= self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values': [ parameters_path, ] }, ])['Parameters'] else: params", "] }, ])['Parameters'] else: params = self.client().describe_parameters()['Parameters'] return [key['Name'] for key in params]", "= self.client().describe_parameters()['Parameters'] return [key['Name'] for key in params] def put(self, key, value, description):", "self.client().describe_parameters(ParameterFilters=[ { 'Key': 'Path', 'Values': [ parameters_path, ] }, ])['Parameters'] else: params =", "= boto3.Session(**client_kwargs) self.ssm_config = config self.profile_name = 'default' def client(self): return self.session.client('ssm') def", "def put(self, key, value, description): self.client().put_parameter( Name=key, Description=description, Value=value, Type='SecureString' if self.ssm_config.should_auto_encrypt_secret_value() else" ]
[ "os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path", "in the 0.x.y range may introduce breaking changes. {% for release in releases", "values: major, minor, patch. description (str): Change description. Returns ------- path : str", "import os import sys import json import click import datetime from distutils.version import", "release_type): \"\"\" Returns a string like '1.0.0'. \"\"\" # Convert to a list", "release(self): \"\"\" Performs the release. The method performs the release by taking everything", "for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version':", "change_type (str): Change type. Allowed values: major, minor, patch. description (str): Change description.", "+ '.json')) as f: data = json.load(f) data = sorted(data, key=lambda k: k['type']", "full_path = os.path.join(next_release_dir, filename) with open(full_path) as f: changes.append(json.load(f)) if len(changes) == 0:", "Previous version. new_version : str New version. \"\"\" changes = [] next_release_dir =", "deprecated = True semversioner_path = semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path,", "0: click.secho(\"Error: No changes to release. Skipping release process.\", fg='red') sys.exit(-1) current_version_number =", "version_parts[0] += 1 version_parts[1] = 0 version_parts[2] = 0 return '.'.join(str(i) for i", "-> %s\" % (current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated '\"", "release.data %} - {{ data.type }}: {{ data.description }} {% endfor %} {%", "changes = [] next_release_dir = self.next_release_path for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir,", "len(releases) > 0: return releases[0] return INITIAL_VERSION def _sorted_releases(self): files = [f for", "0 elif release_type == 'major': version_parts[0] += 1 version_parts[1] = 0 version_parts[2] =", "changes, current_version_number): release_type = sorted(list(map(lambda x: x['type'], changes)))[0] return self._increase_version(current_version_number, release_type) def _increase_version(self,", "INITIAL_VERSION def _sorted_releases(self): files = [f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))]", "current_version_number = self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s -> %s\" %", "os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version': current_version_number, 'new_version': next_version_number } def get_version(self):", "semversioner_path = semversioner_path_new deprecated = False if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated =", "Absolute path of the file generated. \"\"\" parsed_values = { 'type': change_type, 'description':", "x: x['type'], changes)))[0] return self._increase_version(current_version_number, release_type) def _increase_version(self, current_version, release_type): \"\"\" Returns a", "the changelog. The method generates the changelog based on the template file defined", "breaking changes. {% for release in releases %} ## {{ release.id }} {%", "= False if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated = True semversioner_path = semversioner_path_legacy", "click.echo(\"Removing '\" + next_release_dir + \"' directory.\") for filename in os.listdir(next_release_dir): full_path =", "open(full_path) as f: changes.append(json.load(f)) if len(changes) == 0: click.secho(\"Error: No changes to release.", "} def get_version(self): \"\"\" Gets the current version. \"\"\" releases = self._sorted_releases() if", "> 0: return releases[0] return INITIAL_VERSION def _sorted_releases(self): files = [f for f", "by taking everything in ``next-release`` folder and aggregating all together in a single", "% next_version_number) click.echo(\"Generated '\" + release_json_filename + \"' file.\") with open(release_json_filename, 'w') as", "the template file defined in ``DEFAULT_TEMPLATE``. Returns ------- str Changelog string. \"\"\" releases", "JSON file for that release (e.g ``1.12.0.json``). The JSON file generated is a", "current_version.split('.')) if release_type == 'patch': version_parts[2] += 1 elif release_type == 'minor': version_parts[1]", "from distutils.version import StrictVersion from jinja2 import Template ROOTDIR = os.getcwd() INITIAL_VERSION =", "self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f: data = json.load(f) data =", "for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) with open(full_path) as f: changes.append(json.load(f))", "\"\"\" Generates the changelog. The method generates the changelog based on the template", "that release (e.g ``1.12.0.json``). The JSON file generated is a list of all", "with the type and description provided. Parameters ------- change_type (str): Change type. Allowed", "release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated '\" + release_json_filename + \"' file.\")", "x[:-len('.json')], files)) releases = sorted(releases, key=StrictVersion, reverse=True) return releases def _get_next_version_number(self, changes, current_version_number):", "release_type) def _increase_version(self, current_version, release_type): \"\"\" Returns a string like '1.0.0'. \"\"\" #", "current_version_number) click.echo(\"Releasing version: %s -> %s\" % (current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json'", "deprecated = False if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated = True semversioner_path =", "Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs the release. The method performs the release", "JSON files from ``next-release``. After aggregating the files, it removes the ``next-release`` directory.", "== 'major': version_parts[0] += 1 version_parts[1] = 0 version_parts[2] = 0 return '.'.join(str(i)", "(current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated '\" + release_json_filename +", "os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path self.semversioner_path = semversioner_path self.next_release_path", "------- path : str Absolute path of the file generated. \"\"\" parsed_values =", "+ \"\\n\") return { 'path': os.path.join(self.next_release_path, filename) } def generate_changelog(self): \"\"\" Generates the", "while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with", "the file generated. \"\"\" parsed_values = { 'type': change_type, 'description': description, } filename", "os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog Note: version releases in the", "and aggregating all together in a single JSON file for that release (e.g", "\"\"\" # Convert to a list of ints: [1, 0, 0]. version_parts =", "not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path self.semversioner_path = semversioner_path self.next_release_path = next_release_path self.deprecated", "f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\" + next_release_dir + \"' directory.\") for filename", "``next-release``. After aggregating the files, it removes the ``next-release`` directory. Returns ------- previous_version", "os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated = True semversioner_path = semversioner_path_legacy if not os.path.isdir(semversioner_path):", "changes. {% for release in releases %} ## {{ release.id }} {% for", "f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\" + next_release_dir + \"' directory.\") for filename in", "True semversioner_path = semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release') if", "f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return { 'path': os.path.join(self.next_release_path, filename) } def generate_changelog(self): \"\"\"", "self._sorted_releases() if len(releases) > 0: return releases[0] return INITIAL_VERSION def _sorted_releases(self): files =", "in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) with open(full_path) as f: changes.append(json.load(f)) if len(changes)", ": str New version. \"\"\" changes = [] next_release_dir = self.next_release_path for filename", "datetime from distutils.version import StrictVersion from jinja2 import Template ROOTDIR = os.getcwd() INITIAL_VERSION", "= os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version': current_version_number, 'new_version': next_version_number } def", "``next-release`` directory. Returns ------- previous_version : str Previous version. new_version : str New", "1 version_parts[1] = 0 version_parts[2] = 0 return '.'.join(str(i) for i in version_parts)", "in releases %} ## {{ release.id }} {% for data in release.data %}", "= os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path self.semversioner_path = semversioner_path", "to release. Skipping release process.\", fg='red') sys.exit(-1) current_version_number = self.get_version() next_version_number = self._get_next_version_number(changes,", "release. The method performs the release by taking everything in ``next-release`` folder and", "from jinja2 import Template ROOTDIR = os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE = \"\"\"#", "sys.exit(-1) current_version_number = self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s -> %s\"", "of all the individual JSON files from ``next-release``. After aggregating the files, it", "return self._increase_version(current_version_number, release_type) def _increase_version(self, current_version, release_type): \"\"\" Returns a string like '1.0.0'.", "release (e.g ``1.12.0.json``). The JSON file generated is a list of all the", "if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated = True semversioner_path = semversioner_path_legacy if not", "release in releases %} ## {{ release.id }} {% for data in release.data", "Returns ------- previous_version : str Previous version. new_version : str New version. \"\"\"", "files = [f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda", "single JSON file for that release (e.g ``1.12.0.json``). The JSON file generated is", "version releases in the 0.x.y range may introduce breaking changes. {% for release", "Convert to a list of ints: [1, 0, 0]. version_parts = list(int(i) for", "in a single JSON file for that release (e.g ``1.12.0.json``). The JSON file", "a new changeset file. The method creates a new json file in the", "for release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f: data =", "x: x[:-len('.json')], files)) releases = sorted(releases, key=StrictVersion, reverse=True) return releases def _get_next_version_number(self, changes,", "DEFAULT_TEMPLATE = \"\"\"# Changelog Note: version releases in the 0.x.y range may introduce", "{% for data in release.data %} - {{ data.type }}: {{ data.description }}", "self.deprecated def add_change(self, change_type, description): \"\"\" Create a new changeset file. The method", "return releases[0] return INITIAL_VERSION def _sorted_releases(self): files = [f for f in os.listdir(self.semversioner_path)", "(str): Change type. Allowed values: major, minor, patch. description (str): Change description. Returns", "'description': description, } filename = None while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))):", "(filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path,", "for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda x: x[:-len('.json')], files))", "as f: changes.append(json.load(f)) if len(changes) == 0: click.secho(\"Error: No changes to release. Skipping", "'w') as f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return { 'path': os.path.join(self.next_release_path, filename) }", "the ``.semversioner/next-release/`` directory with the type and description provided. Parameters ------- change_type (str):", "open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f: data = json.load(f) data = sorted(data, key=lambda", "if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path self.semversioner_path = semversioner_path self.next_release_path = next_release_path", "file generated is a list of all the individual JSON files from ``next-release``.", "changes)))[0] return self._increase_version(current_version_number, release_type) def _increase_version(self, current_version, release_type): \"\"\" Returns a string like", "data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs the release. The method performs", "k['description']) releases.append({'id': release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs the", "changeset file. The method creates a new json file in the ``.semversioner/next-release/`` directory", "file defined in ``DEFAULT_TEMPLATE``. Returns ------- str Changelog string. \"\"\" releases = []", "} filename = None while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename =", "key=lambda k: k['type'] + k['description']) releases.append({'id': release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def", "release. Skipping release process.\", fg='red') sys.exit(-1) current_version_number = self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number)", "i in current_version.split('.')) if release_type == 'patch': version_parts[2] += 1 elif release_type ==", "Changelog Note: version releases in the 0.x.y range may introduce breaking changes. {%", "= \"\"\"# Changelog Note: version releases in the 0.x.y range may introduce breaking", "jinja2 import Template ROOTDIR = os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog", "0: return releases[0] return INITIAL_VERSION def _sorted_releases(self): files = [f for f in", "self.next_release_path for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) with open(full_path) as f:", "next_version_number) click.echo(\"Generated '\" + release_json_filename + \"' file.\") with open(release_json_filename, 'w') as f:", "version. \"\"\" changes = [] next_release_dir = self.next_release_path for filename in os.listdir(next_release_dir): full_path", "self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s -> %s\" % (current_version_number, next_version_number))", "indent=2, sort_keys=True)) click.echo(\"Removing '\" + next_release_dir + \"' directory.\") for filename in os.listdir(next_release_dir):", "+ next_release_dir + \"' directory.\") for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename)", "directory. Returns ------- previous_version : str Previous version. new_version : str New version.", "type. Allowed values: major, minor, patch. description (str): Change description. Returns ------- path", "fg='red') sys.exit(-1) current_version_number = self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s ->", "The JSON file generated is a list of all the individual JSON files", "Change type. Allowed values: major, minor, patch. description (str): Change description. Returns -------", "open(release_json_filename, 'w') as f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\" + next_release_dir + \"'", "on the template file defined in ``DEFAULT_TEMPLATE``. Returns ------- str Changelog string. \"\"\"", "[1, 0, 0]. version_parts = list(int(i) for i in current_version.split('.')) if release_type ==", "get_version(self): \"\"\" Gets the current version. \"\"\" releases = self._sorted_releases() if len(releases) >", "for that release (e.g ``1.12.0.json``). The JSON file generated is a list of", "parsed_values = { 'type': change_type, 'description': description, } filename = None while (filename", "= '0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog Note: version releases in the 0.x.y range", "is None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename),", "False if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated = True semversioner_path = semversioner_path_legacy if", "return { 'path': os.path.join(self.next_release_path, filename) } def generate_changelog(self): \"\"\" Generates the changelog. The", "= path self.semversioner_path = semversioner_path self.next_release_path = next_release_path self.deprecated = deprecated def is_deprecated(self):", "self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s -> %s\" % (current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path,", "new_version : str New version. \"\"\" changes = [] next_release_dir = self.next_release_path for", "return releases def _get_next_version_number(self, changes, current_version_number): release_type = sorted(list(map(lambda x: x['type'], changes)))[0] return", "list(map(lambda x: x[:-len('.json')], files)) releases = sorted(releases, key=StrictVersion, reverse=True) return releases def _get_next_version_number(self,", "in current_version.split('.')) if release_type == 'patch': version_parts[2] += 1 elif release_type == 'minor':", "%} - {{ data.type }}: {{ data.description }} {% endfor %} {% endfor", "of ints: [1, 0, 0]. version_parts = list(int(i) for i in current_version.split('.')) if", "\"' directory.\") for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return", "version_parts[2] += 1 elif release_type == 'minor': version_parts[1] += 1 version_parts[2] = 0", "path : str Absolute path of the file generated. \"\"\" parsed_values = {", "to a list of ints: [1, 0, 0]. version_parts = list(int(i) for i", "version_parts[2] = 0 elif release_type == 'major': version_parts[0] += 1 version_parts[1] = 0", "1 elif release_type == 'minor': version_parts[1] += 1 version_parts[2] = 0 elif release_type", "+ release_json_filename + \"' file.\") with open(release_json_filename, 'w') as f: f.write(json.dumps(changes, indent=2, sort_keys=True))", "filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w') as f: f.write(json.dumps(parsed_values, indent=2)", "current_version_number, 'new_version': next_version_number } def get_version(self): \"\"\" Gets the current version. \"\"\" releases", "------- previous_version : str Previous version. new_version : str New version. \"\"\" changes", "changes to release. Skipping release process.\", fg='red') sys.exit(-1) current_version_number = self.get_version() next_version_number =", "in ``DEFAULT_TEMPLATE``. Returns ------- str Changelog string. \"\"\" releases = [] for release_identifier", "list of all the individual JSON files from ``next-release``. After aggregating the files,", "def add_change(self, change_type, description): \"\"\" Create a new changeset file. The method creates", "``.semversioner/next-release/`` directory with the type and description provided. Parameters ------- change_type (str): Change", "str New version. \"\"\" changes = [] next_release_dir = self.next_release_path for filename in", "\"\"\"# Changelog Note: version releases in the 0.x.y range may introduce breaking changes.", "Allowed values: major, minor, patch. description (str): Change description. Returns ------- path :", "def _sorted_releases(self): files = [f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases", "distutils.version import StrictVersion from jinja2 import Template ROOTDIR = os.getcwd() INITIAL_VERSION = '0.0.0'", "individual JSON files from ``next-release``. After aggregating the files, it removes the ``next-release``", "releases = self._sorted_releases() if len(releases) > 0: return releases[0] return INITIAL_VERSION def _sorted_releases(self):", "trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs the release. The method performs the release by", "not os.path.isdir(semversioner_path_new): deprecated = True semversioner_path = semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path", "{{ data.type }}: {{ data.description }} {% endfor %} {% endfor %} \"\"\"", "not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path =", "New version. \"\"\" changes = [] next_release_dir = self.next_release_path for filename in os.listdir(next_release_dir):", "with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f: data = json.load(f) data = sorted(data,", "click.echo(\"Releasing version: %s -> %s\" % (current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json' %", "= None while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'],", "os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) with open(full_path) as f: changes.append(json.load(f)) if len(changes) ==", "= semversioner_path_new deprecated = False if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated = True", "%} \"\"\" class Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new =", "with open(release_json_filename, 'w') as f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\" + next_release_dir +", "'\" + next_release_dir + \"' directory.\") for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir,", "version. new_version : str New version. \"\"\" changes = [] next_release_dir = self.next_release_path", "+ \"' directory.\") for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir)", "'type': change_type, 'description': description, } filename = None while (filename is None or", "the individual JSON files from ``next-release``. After aggregating the files, it removes the", "semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new deprecated =", "self.deprecated = deprecated def is_deprecated(self): return self.deprecated def add_change(self, change_type, description): \"\"\" Create", "\"\\n\") return { 'path': os.path.join(self.next_release_path, filename) } def generate_changelog(self): \"\"\" Generates the changelog.", "like '1.0.0'. \"\"\" # Convert to a list of ints: [1, 0, 0].", "``next-release`` folder and aggregating all together in a single JSON file for that", "releases = [] for release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as", "json.load(f) data = sorted(data, key=lambda k: k['type'] + k['description']) releases.append({'id': release_identifier, 'data': data})", "generate_changelog(self): \"\"\" Generates the changelog. The method generates the changelog based on the", "str Absolute path of the file generated. \"\"\" parsed_values = { 'type': change_type,", "'w') as f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\" + next_release_dir + \"' directory.\")", "template file defined in ``DEFAULT_TEMPLATE``. Returns ------- str Changelog string. \"\"\" releases =", "{% endfor %} {% endfor %} \"\"\" class Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy", "it removes the ``next-release`` directory. Returns ------- previous_version : str Previous version. new_version", "def is_deprecated(self): return self.deprecated def add_change(self, change_type, description): \"\"\" Create a new changeset", "change_type, 'description': description, } filename = None while (filename is None or os.path.isfile(os.path.join(self.next_release_path,", "} def generate_changelog(self): \"\"\" Generates the changelog. The method generates the changelog based", "def generate_changelog(self): \"\"\" Generates the changelog. The method generates the changelog based on", "os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w') as f:", "process.\", fg='red') sys.exit(-1) current_version_number = self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s", "with open(os.path.join(self.next_release_path, filename), 'w') as f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return { 'path':", "``DEFAULT_TEMPLATE``. Returns ------- str Changelog string. \"\"\" releases = [] for release_identifier in", "in the ``.semversioner/next-release/`` directory with the type and description provided. Parameters ------- change_type", "add_change(self, change_type, description): \"\"\" Create a new changeset file. The method creates a", "import StrictVersion from jinja2 import Template ROOTDIR = os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE", "may introduce breaking changes. {% for release in releases %} ## {{ release.id", "the current version. \"\"\" releases = self._sorted_releases() if len(releases) > 0: return releases[0]", "a single JSON file for that release (e.g ``1.12.0.json``). The JSON file generated", "if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path", "= self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s -> %s\" % (current_version_number, next_version_number)) release_json_filename =", "release_json_filename + \"' file.\") with open(release_json_filename, 'w') as f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing", "the type and description provided. Parameters ------- change_type (str): Change type. Allowed values:", "os.path.join(self.next_release_path, filename) } def generate_changelog(self): \"\"\" Generates the changelog. The method generates the", "next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s -> %s\" % (current_version_number, next_version_number)) release_json_filename", "Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path", "def _increase_version(self, current_version, release_type): \"\"\" Returns a string like '1.0.0'. \"\"\" # Convert", "directory.\") for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return {", "everything in ``next-release`` folder and aggregating all together in a single JSON file", "= semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path):", "generated. \"\"\" parsed_values = { 'type': change_type, 'description': description, } filename = None", "in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version': current_version_number, 'new_version':", "k['type'] + k['description']) releases.append({'id': release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\"", "releases.append({'id': release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs the release.", "elif release_type == 'minor': version_parts[1] += 1 version_parts[2] = 0 elif release_type ==", "= deprecated def is_deprecated(self): return self.deprecated def add_change(self, change_type, description): \"\"\" Create a", "}}: {{ data.description }} {% endfor %} {% endfor %} \"\"\" class Semversioner:", "``1.12.0.json``). The JSON file generated is a list of all the individual JSON", "os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda x: x[:-len('.json')], files)) releases = sorted(releases,", "f))] releases = list(map(lambda x: x[:-len('.json')], files)) releases = sorted(releases, key=StrictVersion, reverse=True) return", "minor, patch. description (str): Change description. Returns ------- path : str Absolute path", "Performs the release. The method performs the release by taking everything in ``next-release``", "import json import click import datetime from distutils.version import StrictVersion from jinja2 import", "os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated '\" + release_json_filename + \"' file.\") with open(release_json_filename,", "datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w') as f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return {", "major, minor, patch. description (str): Change description. Returns ------- path : str Absolute", "'.changes') semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new deprecated = False if os.path.isdir(semversioner_path_legacy)", "Create a new changeset file. The method creates a new json file in", "key=StrictVersion, reverse=True) return releases def _get_next_version_number(self, changes, current_version_number): release_type = sorted(list(map(lambda x: x['type'],", "click.secho(\"Error: No changes to release. Skipping release process.\", fg='red') sys.exit(-1) current_version_number = self.get_version()", "Returns ------- str Changelog string. \"\"\" releases = [] for release_identifier in self._sorted_releases():", "Gets the current version. \"\"\" releases = self._sorted_releases() if len(releases) > 0: return", "%} ## {{ release.id }} {% for data in release.data %} - {{", "next_release_path self.deprecated = deprecated def is_deprecated(self): return self.deprecated def add_change(self, change_type, description): \"\"\"", "'%s.json' % next_version_number) click.echo(\"Generated '\" + release_json_filename + \"' file.\") with open(release_json_filename, 'w')", "f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return { 'path': os.path.join(self.next_release_path, filename) } def generate_changelog(self):", "self.next_release_path = next_release_path self.deprecated = deprecated def is_deprecated(self): return self.deprecated def add_change(self, change_type,", "% (current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated '\" + release_json_filename", "= 0 elif release_type == 'major': version_parts[0] += 1 version_parts[1] = 0 version_parts[2]", "new json file in the ``.semversioner/next-release/`` directory with the type and description provided.", "%s -> %s\" % (current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated", "folder and aggregating all together in a single JSON file for that release", "together in a single JSON file for that release (e.g ``1.12.0.json``). The JSON", "%} {% endfor %} \"\"\" class Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path,", "version: %s -> %s\" % (current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number)", "= json.load(f) data = sorted(data, key=lambda k: k['type'] + k['description']) releases.append({'id': release_identifier, 'data':", "files, it removes the ``next-release`` directory. Returns ------- previous_version : str Previous version.", "def _get_next_version_number(self, changes, current_version_number): release_type = sorted(list(map(lambda x: x['type'], changes)))[0] return self._increase_version(current_version_number, release_type)", "description, } filename = None while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename", "= '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w') as f: f.write(json.dumps(parsed_values, indent=2) +", "performs the release by taking everything in ``next-release`` folder and aggregating all together", "string like '1.0.0'. \"\"\" # Convert to a list of ints: [1, 0,", "The method performs the release by taking everything in ``next-release`` folder and aggregating", "+= 1 version_parts[2] = 0 elif release_type == 'major': version_parts[0] += 1 version_parts[1]", "from ``next-release``. After aggregating the files, it removes the ``next-release`` directory. Returns -------", "and not os.path.isdir(semversioner_path_new): deprecated = True semversioner_path = semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path)", "_sorted_releases(self): files = [f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases =", "next_version_number } def get_version(self): \"\"\" Gets the current version. \"\"\" releases = self._sorted_releases()", "a string like '1.0.0'. \"\"\" # Convert to a list of ints: [1,", "JSON file generated is a list of all the individual JSON files from", "self.path = path self.semversioner_path = semversioner_path self.next_release_path = next_release_path self.deprecated = deprecated def", "{ 'previous_version': current_version_number, 'new_version': next_version_number } def get_version(self): \"\"\" Gets the current version.", "(e.g ``1.12.0.json``). The JSON file generated is a list of all the individual", "with open(full_path) as f: changes.append(json.load(f)) if len(changes) == 0: click.secho(\"Error: No changes to", "= os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new deprecated = False if os.path.isdir(semversioner_path_legacy) and not", "= [] for release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f:", "\"\"\" Performs the release. The method performs the release by taking everything in", "description. Returns ------- path : str Absolute path of the file generated. \"\"\"", "== 'patch': version_parts[2] += 1 elif release_type == 'minor': version_parts[1] += 1 version_parts[2]", "\"\"\" releases = self._sorted_releases() if len(releases) > 0: return releases[0] return INITIAL_VERSION def", "based on the template file defined in ``DEFAULT_TEMPLATE``. Returns ------- str Changelog string.", "## {{ release.id }} {% for data in release.data %} - {{ data.type", "creates a new json file in the ``.semversioner/next-release/`` directory with the type and", "method generates the changelog based on the template file defined in ``DEFAULT_TEMPLATE``. Returns", ": str Previous version. new_version : str New version. \"\"\" changes = []", "f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda x: x[:-len('.json')], files)) releases", "release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs the release. The", "= list(int(i) for i in current_version.split('.')) if release_type == 'patch': version_parts[2] += 1", "releases[0] return INITIAL_VERSION def _sorted_releases(self): files = [f for f in os.listdir(self.semversioner_path) if", "all together in a single JSON file for that release (e.g ``1.12.0.json``). The", "generated is a list of all the individual JSON files from ``next-release``. After", "'\" + release_json_filename + \"' file.\") with open(release_json_filename, 'w') as f: f.write(json.dumps(changes, indent=2,", "json import click import datetime from distutils.version import StrictVersion from jinja2 import Template", "'new_version': next_version_number } def get_version(self): \"\"\" Gets the current version. \"\"\" releases =", "== 'minor': version_parts[1] += 1 version_parts[2] = 0 elif release_type == 'major': version_parts[0]", "f: changes.append(json.load(f)) if len(changes) == 0: click.secho(\"Error: No changes to release. Skipping release", "{% endfor %} \"\"\" class Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes')", "None while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow()))", "as f: data = json.load(f) data = sorted(data, key=lambda k: k['type'] + k['description'])", "'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs the release. The method", "'previous_version': current_version_number, 'new_version': next_version_number } def get_version(self): \"\"\" Gets the current version. \"\"\"", "data.type }}: {{ data.description }} {% endfor %} {% endfor %} \"\"\" class", "filename), 'w') as f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return { 'path': os.path.join(self.next_release_path, filename)", "sort_keys=True)) click.echo(\"Removing '\" + next_release_dir + \"' directory.\") for filename in os.listdir(next_release_dir): full_path", "if release_type == 'patch': version_parts[2] += 1 elif release_type == 'minor': version_parts[1] +=", ": str Absolute path of the file generated. \"\"\" parsed_values = { 'type':", "directory with the type and description provided. Parameters ------- change_type (str): Change type.", "releases in the 0.x.y range may introduce breaking changes. {% for release in", "len(changes) == 0: click.secho(\"Error: No changes to release. Skipping release process.\", fg='red') sys.exit(-1)", "in ``next-release`` folder and aggregating all together in a single JSON file for", "import Template ROOTDIR = os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog Note:", "data in release.data %} - {{ data.type }}: {{ data.description }} {% endfor", "semversioner_path_new deprecated = False if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated = True semversioner_path", "import click import datetime from distutils.version import StrictVersion from jinja2 import Template ROOTDIR", "k: k['type'] + k['description']) releases.append({'id': release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self):", "is_deprecated(self): return self.deprecated def add_change(self, change_type, description): \"\"\" Create a new changeset file.", "%s\" % (current_version_number, next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated '\" +", "the ``next-release`` directory. Returns ------- previous_version : str Previous version. new_version : str", "= [] next_release_dir = self.next_release_path for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename)", "release_type == 'patch': version_parts[2] += 1 elif release_type == 'minor': version_parts[1] += 1", "'.semversioner') semversioner_path = semversioner_path_new deprecated = False if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new): deprecated", "_increase_version(self, current_version, release_type): \"\"\" Returns a string like '1.0.0'. \"\"\" # Convert to", "description provided. Parameters ------- change_type (str): Change type. Allowed values: major, minor, patch.", "as f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\" + next_release_dir + \"' directory.\") for", "releases = sorted(releases, key=StrictVersion, reverse=True) return releases def _get_next_version_number(self, changes, current_version_number): release_type =", "sorted(list(map(lambda x: x['type'], changes)))[0] return self._increase_version(current_version_number, release_type) def _increase_version(self, current_version, release_type): \"\"\" Returns", "os.path.isdir(semversioner_path_new): deprecated = True semversioner_path = semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path =", "open(os.path.join(self.next_release_path, filename), 'w') as f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return { 'path': os.path.join(self.next_release_path,", "= sorted(data, key=lambda k: k['type'] + k['description']) releases.append({'id': release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE,", "release by taking everything in ``next-release`` folder and aggregating all together in a", "sorted(data, key=lambda k: k['type'] + k['description']) releases.append({'id': release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases)", "file in the ``.semversioner/next-release/`` directory with the type and description provided. Parameters -------", "method performs the release by taking everything in ``next-release`` folder and aggregating all", "filename) } def generate_changelog(self): \"\"\" Generates the changelog. The method generates the changelog", "\"\"\" Create a new changeset file. The method creates a new json file", "0.x.y range may introduce breaking changes. {% for release in releases %} ##", "The method generates the changelog based on the template file defined in ``DEFAULT_TEMPLATE``.", "------- change_type (str): Change type. Allowed values: major, minor, patch. description (str): Change", "\"\"\" parsed_values = { 'type': change_type, 'description': description, } filename = None while", "a new json file in the ``.semversioner/next-release/`` directory with the type and description", "range may introduce breaking changes. {% for release in releases %} ## {{", "string. \"\"\" releases = [] for release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier +", "release_type == 'minor': version_parts[1] += 1 version_parts[2] = 0 elif release_type == 'major':", "provided. Parameters ------- change_type (str): Change type. Allowed values: major, minor, patch. description", "file for that release (e.g ``1.12.0.json``). The JSON file generated is a list", "filename) os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version': current_version_number, 'new_version': next_version_number } def get_version(self): \"\"\"", "json file in the ``.semversioner/next-release/`` directory with the type and description provided. Parameters", "filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) with open(full_path) as f: changes.append(json.load(f)) if", "path of the file generated. \"\"\" parsed_values = { 'type': change_type, 'description': description,", "path self.semversioner_path = semversioner_path self.next_release_path = next_release_path self.deprecated = deprecated def is_deprecated(self): return", "+= 1 elif release_type == 'minor': version_parts[1] += 1 version_parts[2] = 0 elif", "change_type, description): \"\"\" Create a new changeset file. The method creates a new", "Skipping release process.\", fg='red') sys.exit(-1) current_version_number = self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing", "'patch': version_parts[2] += 1 elif release_type == 'minor': version_parts[1] += 1 version_parts[2] =", "= list(map(lambda x: x[:-len('.json')], files)) releases = sorted(releases, key=StrictVersion, reverse=True) return releases def", "removes the ``next-release`` directory. Returns ------- previous_version : str Previous version. new_version :", "\"\"\" Gets the current version. \"\"\" releases = self._sorted_releases() if len(releases) > 0:", "After aggregating the files, it removes the ``next-release`` directory. Returns ------- previous_version :", "release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f: data = json.load(f)", "def __init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path =", "in release.data %} - {{ data.type }}: {{ data.description }} {% endfor %}", "os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda x: x[:-len('.json')], files)) releases = sorted(releases, key=StrictVersion, reverse=True)", "click import datetime from distutils.version import StrictVersion from jinja2 import Template ROOTDIR =", "for release in releases %} ## {{ release.id }} {% for data in", "new changeset file. The method creates a new json file in the ``.semversioner/next-release/``", "import sys import json import click import datetime from distutils.version import StrictVersion from", "releases def _get_next_version_number(self, changes, current_version_number): release_type = sorted(list(map(lambda x: x['type'], changes)))[0] return self._increase_version(current_version_number,", "if os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda x: x[:-len('.json')], files)) releases = sorted(releases, key=StrictVersion,", "= self.next_release_path for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) with open(full_path) as", "path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new deprecated", "type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w') as f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return", "os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version': current_version_number, 'new_version': next_version_number } def get_version(self): \"\"\" Gets", "str Changelog string. \"\"\" releases = [] for release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path,", "ints: [1, 0, 0]. version_parts = list(int(i) for i in current_version.split('.')) if release_type", "def get_version(self): \"\"\" Gets the current version. \"\"\" releases = self._sorted_releases() if len(releases)", "f: data = json.load(f) data = sorted(data, key=lambda k: k['type'] + k['description']) releases.append({'id':", "os.path.join(next_release_dir, filename) with open(full_path) as f: changes.append(json.load(f)) if len(changes) == 0: click.secho(\"Error: No", "os.rmdir(next_release_dir) return { 'previous_version': current_version_number, 'new_version': next_version_number } def get_version(self): \"\"\" Gets the", "semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new deprecated = False if os.path.isdir(semversioner_path_legacy) and", "if len(changes) == 0: click.secho(\"Error: No changes to release. Skipping release process.\", fg='red')", "'{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w') as f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\")", "'major': version_parts[0] += 1 version_parts[1] = 0 version_parts[2] = 0 return '.'.join(str(i) for", "os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path self.semversioner_path = semversioner_path self.next_release_path = next_release_path self.deprecated =", "filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w') as f: f.write(json.dumps(parsed_values,", "taking everything in ``next-release`` folder and aggregating all together in a single JSON", "version_parts = list(int(i) for i in current_version.split('.')) if release_type == 'patch': version_parts[2] +=", "in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda x: x[:-len('.json')], files)) releases =", "'.json')) as f: data = json.load(f) data = sorted(data, key=lambda k: k['type'] +", "the release. The method performs the release by taking everything in ``next-release`` folder", "patch. description (str): Change description. Returns ------- path : str Absolute path of", "+= 1 version_parts[1] = 0 version_parts[2] = 0 return '.'.join(str(i) for i in", "semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path)", "The method creates a new json file in the ``.semversioner/next-release/`` directory with the", "sys import json import click import datetime from distutils.version import StrictVersion from jinja2", "os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version': current_version_number, 'new_version': next_version_number", "data.description }} {% endfor %} {% endfor %} \"\"\" class Semversioner: def __init__(self,", "'0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog Note: version releases in the 0.x.y range may", "------- str Changelog string. \"\"\" releases = [] for release_identifier in self._sorted_releases(): with", "+ k['description']) releases.append({'id': release_identifier, 'data': data}) return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs", "if len(releases) > 0: return releases[0] return INITIAL_VERSION def _sorted_releases(self): files = [f", "\"\"\" changes = [] next_release_dir = self.next_release_path for filename in os.listdir(next_release_dir): full_path =", "type and description provided. Parameters ------- change_type (str): Change type. Allowed values: major,", "Note: version releases in the 0.x.y range may introduce breaking changes. {% for", "the 0.x.y range may introduce breaking changes. {% for release in releases %}", "indent=2) + \"\\n\") return { 'path': os.path.join(self.next_release_path, filename) } def generate_changelog(self): \"\"\" Generates", "{ 'path': os.path.join(self.next_release_path, filename) } def generate_changelog(self): \"\"\" Generates the changelog. The method", "{{ release.id }} {% for data in release.data %} - {{ data.type }}:", "is a list of all the individual JSON files from ``next-release``. After aggregating", "Parameters ------- change_type (str): Change type. Allowed values: major, minor, patch. description (str):", "for i in current_version.split('.')) if release_type == 'patch': version_parts[2] += 1 elif release_type", "filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version': current_version_number,", "Returns ------- path : str Absolute path of the file generated. \"\"\" parsed_values", "def release(self): \"\"\" Performs the release. The method performs the release by taking", "== 0: click.secho(\"Error: No changes to release. Skipping release process.\", fg='red') sys.exit(-1) current_version_number", "next_release_dir + \"' directory.\") for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) os.remove(full_path)", "current version. \"\"\" releases = self._sorted_releases() if len(releases) > 0: return releases[0] return", "Change description. Returns ------- path : str Absolute path of the file generated.", "for data in release.data %} - {{ data.type }}: {{ data.description }} {%", "Returns a string like '1.0.0'. \"\"\" # Convert to a list of ints:", "'1.0.0'. \"\"\" # Convert to a list of ints: [1, 0, 0]. version_parts", "file generated. \"\"\" parsed_values = { 'type': change_type, 'description': description, } filename =", "next_version_number)) release_json_filename = os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated '\" + release_json_filename + \"'", "return Template(DEFAULT_TEMPLATE, trim_blocks=True).render(releases=releases) def release(self): \"\"\" Performs the release. The method performs the", "next_release_path = os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path self.semversioner_path =", "= sorted(list(map(lambda x: x['type'], changes)))[0] return self._increase_version(current_version_number, release_type) def _increase_version(self, current_version, release_type): \"\"\"", "\"\"\" Returns a string like '1.0.0'. \"\"\" # Convert to a list of", "= True semversioner_path = semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release')", "import datetime from distutils.version import StrictVersion from jinja2 import Template ROOTDIR = os.getcwd()", "= os.path.join(self.semversioner_path, '%s.json' % next_version_number) click.echo(\"Generated '\" + release_json_filename + \"' file.\") with", "data = json.load(f) data = sorted(data, key=lambda k: k['type'] + k['description']) releases.append({'id': release_identifier,", "StrictVersion from jinja2 import Template ROOTDIR = os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE =", "in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f: data = json.load(f) data", "current_version, release_type): \"\"\" Returns a string like '1.0.0'. \"\"\" # Convert to a", "class Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new = os.path.join(path, '.semversioner')", "filename) with open(full_path) as f: changes.append(json.load(f)) if len(changes) == 0: click.secho(\"Error: No changes", "Template ROOTDIR = os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog Note: version", "reverse=True) return releases def _get_next_version_number(self, changes, current_version_number): release_type = sorted(list(map(lambda x: x['type'], changes)))[0]", "file. The method creates a new json file in the ``.semversioner/next-release/`` directory with", "generates the changelog based on the template file defined in ``DEFAULT_TEMPLATE``. Returns -------", "the files, it removes the ``next-release`` directory. Returns ------- previous_version : str Previous", "method creates a new json file in the ``.semversioner/next-release/`` directory with the type", "files from ``next-release``. After aggregating the files, it removes the ``next-release`` directory. Returns", "None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w')", "a list of all the individual JSON files from ``next-release``. After aggregating the", "'path': os.path.join(self.next_release_path, filename) } def generate_changelog(self): \"\"\" Generates the changelog. The method generates", "os.path.join(path, '.changes') semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new deprecated = False if", "all the individual JSON files from ``next-release``. After aggregating the files, it removes", "filename = None while (filename is None or os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format(", "or os.path.isfile(os.path.join(self.next_release_path, filename))): filename = '{type_name}-{datetime}.json'.format( type_name=parsed_values['type'], datetime=\"{:%Y%m%d%H%M%S}\".format(datetime.datetime.utcnow())) with open(os.path.join(self.next_release_path, filename), 'w') as", "Changelog string. \"\"\" releases = [] for release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier", "\"' file.\") with open(release_json_filename, 'w') as f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\" +", "= os.path.join(path, '.changes') semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new deprecated = False", "endfor %} \"\"\" class Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new", "os.makedirs(next_release_path) self.path = path self.semversioner_path = semversioner_path self.next_release_path = next_release_path self.deprecated = deprecated", "= sorted(releases, key=StrictVersion, reverse=True) return releases def _get_next_version_number(self, changes, current_version_number): release_type = sorted(list(map(lambda", "Generates the changelog. The method generates the changelog based on the template file", "os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path self.semversioner_path", "release.id }} {% for data in release.data %} - {{ data.type }}: {{", "{% for release in releases %} ## {{ release.id }} {% for data", "deprecated def is_deprecated(self): return self.deprecated def add_change(self, change_type, description): \"\"\" Create a new", "files)) releases = sorted(releases, key=StrictVersion, reverse=True) return releases def _get_next_version_number(self, changes, current_version_number): release_type", "and description provided. Parameters ------- change_type (str): Change type. Allowed values: major, minor,", "1 version_parts[2] = 0 elif release_type == 'major': version_parts[0] += 1 version_parts[1] =", "os import sys import json import click import datetime from distutils.version import StrictVersion", "}} {% endfor %} {% endfor %} \"\"\" class Semversioner: def __init__(self, path=ROOTDIR):", "click.echo(\"Generated '\" + release_json_filename + \"' file.\") with open(release_json_filename, 'w') as f: f.write(json.dumps(changes,", "ROOTDIR = os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog Note: version releases", "changelog. The method generates the changelog based on the template file defined in", "the release by taking everything in ``next-release`` folder and aggregating all together in", "}} {% for data in release.data %} - {{ data.type }}: {{ data.description", "aggregating the files, it removes the ``next-release`` directory. Returns ------- previous_version : str", "= next_release_path self.deprecated = deprecated def is_deprecated(self): return self.deprecated def add_change(self, change_type, description):", "self.semversioner_path = semversioner_path self.next_release_path = next_release_path self.deprecated = deprecated def is_deprecated(self): return self.deprecated", "release_identifier + '.json')) as f: data = json.load(f) data = sorted(data, key=lambda k:", "__init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new = os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new", "= os.path.join(next_release_dir, filename) with open(full_path) as f: changes.append(json.load(f)) if len(changes) == 0: click.secho(\"Error:", "'next-release') if not os.path.isdir(next_release_path): os.makedirs(next_release_path) self.path = path self.semversioner_path = semversioner_path self.next_release_path =", "semversioner_path = semversioner_path_legacy if not os.path.isdir(semversioner_path): os.makedirs(semversioner_path) next_release_path = os.path.join(semversioner_path, 'next-release') if not", "+ \"' file.\") with open(release_json_filename, 'w') as f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\"", "x['type'], changes)))[0] return self._increase_version(current_version_number, release_type) def _increase_version(self, current_version, release_type): \"\"\" Returns a string", "str Previous version. new_version : str New version. \"\"\" changes = [] next_release_dir", "version. \"\"\" releases = self._sorted_releases() if len(releases) > 0: return releases[0] return INITIAL_VERSION", "as f: f.write(json.dumps(parsed_values, indent=2) + \"\\n\") return { 'path': os.path.join(self.next_release_path, filename) } def", "{{ data.description }} {% endfor %} {% endfor %} \"\"\" class Semversioner: def", "No changes to release. Skipping release process.\", fg='red') sys.exit(-1) current_version_number = self.get_version() next_version_number", "changelog based on the template file defined in ``DEFAULT_TEMPLATE``. Returns ------- str Changelog", "(str): Change description. Returns ------- path : str Absolute path of the file", "- {{ data.type }}: {{ data.description }} {% endfor %} {% endfor %}", "release_type == 'major': version_parts[0] += 1 version_parts[1] = 0 version_parts[2] = 0 return", "of the file generated. \"\"\" parsed_values = { 'type': change_type, 'description': description, }", "sorted(releases, key=StrictVersion, reverse=True) return releases def _get_next_version_number(self, changes, current_version_number): release_type = sorted(list(map(lambda x:", "\"\"\" class Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy = os.path.join(path, '.changes') semversioner_path_new = os.path.join(path,", "{ 'type': change_type, 'description': description, } filename = None while (filename is None", "return INITIAL_VERSION def _sorted_releases(self): files = [f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path,", "os.path.join(path, '.semversioner') semversioner_path = semversioner_path_new deprecated = False if os.path.isdir(semversioner_path_legacy) and not os.path.isdir(semversioner_path_new):", "description): \"\"\" Create a new changeset file. The method creates a new json", "a list of ints: [1, 0, 0]. version_parts = list(int(i) for i in", "introduce breaking changes. {% for release in releases %} ## {{ release.id }}", "_get_next_version_number(self, changes, current_version_number): release_type = sorted(list(map(lambda x: x['type'], changes)))[0] return self._increase_version(current_version_number, release_type) def", "self._increase_version(current_version_number, release_type) def _increase_version(self, current_version, release_type): \"\"\" Returns a string like '1.0.0'. \"\"\"", "semversioner_path self.next_release_path = next_release_path self.deprecated = deprecated def is_deprecated(self): return self.deprecated def add_change(self,", "endfor %} {% endfor %} \"\"\" class Semversioner: def __init__(self, path=ROOTDIR): semversioner_path_legacy =", "the changelog based on the template file defined in ``DEFAULT_TEMPLATE``. Returns ------- str", "return { 'previous_version': current_version_number, 'new_version': next_version_number } def get_version(self): \"\"\" Gets the current", "elif release_type == 'major': version_parts[0] += 1 version_parts[1] = 0 version_parts[2] = 0", "0, 0]. version_parts = list(int(i) for i in current_version.split('.')) if release_type == 'patch':", "version_parts[1] += 1 version_parts[2] = 0 elif release_type == 'major': version_parts[0] += 1", "releases %} ## {{ release.id }} {% for data in release.data %} -", "[f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda x: x[:-len('.json')],", "data = sorted(data, key=lambda k: k['type'] + k['description']) releases.append({'id': release_identifier, 'data': data}) return", "release_type = sorted(list(map(lambda x: x['type'], changes)))[0] return self._increase_version(current_version_number, release_type) def _increase_version(self, current_version, release_type):", "= [f for f in os.listdir(self.semversioner_path) if os.path.isfile(os.path.join(self.semversioner_path, f))] releases = list(map(lambda x:", "list of ints: [1, 0, 0]. version_parts = list(int(i) for i in current_version.split('.'))", "return self.deprecated def add_change(self, change_type, description): \"\"\" Create a new changeset file. The", "# Convert to a list of ints: [1, 0, 0]. version_parts = list(int(i)", "= { 'type': change_type, 'description': description, } filename = None while (filename is", "previous_version : str Previous version. new_version : str New version. \"\"\" changes =", "release process.\", fg='red') sys.exit(-1) current_version_number = self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version:", "[] for release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier + '.json')) as f: data", "= self._sorted_releases() if len(releases) > 0: return releases[0] return INITIAL_VERSION def _sorted_releases(self): files", "full_path = os.path.join(next_release_dir, filename) os.remove(full_path) os.rmdir(next_release_dir) return { 'previous_version': current_version_number, 'new_version': next_version_number }", "0]. version_parts = list(int(i) for i in current_version.split('.')) if release_type == 'patch': version_parts[2]", "list(int(i) for i in current_version.split('.')) if release_type == 'patch': version_parts[2] += 1 elif", "'minor': version_parts[1] += 1 version_parts[2] = 0 elif release_type == 'major': version_parts[0] +=", "aggregating all together in a single JSON file for that release (e.g ``1.12.0.json``).", "next_release_dir = self.next_release_path for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) with open(full_path)", "= os.getcwd() INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog Note: version releases in", "\"\"\" releases = [] for release_identifier in self._sorted_releases(): with open(os.path.join(self.semversioner_path, release_identifier + '.json'))", "file.\") with open(release_json_filename, 'w') as f: f.write(json.dumps(changes, indent=2, sort_keys=True)) click.echo(\"Removing '\" + next_release_dir", "releases = list(map(lambda x: x[:-len('.json')], files)) releases = sorted(releases, key=StrictVersion, reverse=True) return releases", "defined in ``DEFAULT_TEMPLATE``. Returns ------- str Changelog string. \"\"\" releases = [] for", "= semversioner_path self.next_release_path = next_release_path self.deprecated = deprecated def is_deprecated(self): return self.deprecated def", "INITIAL_VERSION = '0.0.0' DEFAULT_TEMPLATE = \"\"\"# Changelog Note: version releases in the 0.x.y", "changes.append(json.load(f)) if len(changes) == 0: click.secho(\"Error: No changes to release. Skipping release process.\",", "description (str): Change description. Returns ------- path : str Absolute path of the", "current_version_number): release_type = sorted(list(map(lambda x: x['type'], changes)))[0] return self._increase_version(current_version_number, release_type) def _increase_version(self, current_version,", "= self.get_version() next_version_number = self._get_next_version_number(changes, current_version_number) click.echo(\"Releasing version: %s -> %s\" % (current_version_number,", "[] next_release_dir = self.next_release_path for filename in os.listdir(next_release_dir): full_path = os.path.join(next_release_dir, filename) with" ]
[ "\".\" # extension = name.split('.')[-1] # matching only extension after last \".\" except:", "as f: f.write(bytes(0)) writable = True except Exception as e: print('write test failed:", "# in case its a number and not None if name_in and name_in", "extension = name.split('.')[-1] # matching only extension after last \".\" except: extension =", "after last \".\" except: extension = None if extension: name_path = Path(f'{clean_name}.{extension}') #", "name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it extension", "name_out = self._make_name(url_path, name_out) if not d_path: # download_path = self.src_path.parent download_path =", "name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it return", "else: size = 1 with open(save_file, 'wb') as fd: tmp = 0 print(f'Downloding:", "'█' if progress > 1: progress = 1 blocks = int(progress / (1/FULL_BLOCKLENGTH))", "len(name_parts) > 1: name_noext = '.'.join(name_parts[:-1]) # joining together without extension else: name_noext", "there if save_file.exists(): print('skipping', save_file.name) return r = self.session.get(url) # size = float(r.headers['content-length'])", "if __name__ == \"__main__\": # d_path = input_loop() #let user decide where to", "name, taking name from url') name = re.split(r'[?]',url_path.name)[0] # if '?' in url,", "d_path: # download_path = self.src_path.parent download_path = self.cwd else: download_path = Path(d_path) #", "0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp", "in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp += 1024 if printprogess: self._print_progress(tmp, size) print('')", "test write failed: ',e) pass return writable if __name__ == \"__main__\": # d_path", "= name_loop() # let user decide what name it will have d =", "after last \".\" # extension = name.split('.')[-1] # matching only extension after last", "1\\n Returns the bar with current progress as a string \"\"\" FULL_BLOCKLENGTH =", "url `d_path`: Default download path is current working directory. `name_out`: Default name is", "fillblock*blocks bar_end = (33 - len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f} % ' text", "return inp #try: # d_path = Path(inp) #except Exception as e: # print('invalid", "size = 1 with open(save_file, 'wb') as fd: tmp = 0 print(f'Downloding: {save_file.name}')", "specified. `printprogress`: Prints current download progress in terminal. \"\"\" url_path = Path(url) #download_path", "r = self.session.get(url) # size = float(r.headers['content-length']) contentlength = r.headers.get('content-length') if contentlength is", "= fillblock*blocks bar_end = (33 - len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f} % '", "import re import os class Downloader: \"\"\" class to manage downloading url links", "be between 0 and 1\\n Returns the bar with current progress as a", "#except Exception as e: # print('invalid path, try again\\n') # continue #if d_path.exists():", "if not specified. `printprogress`: Prints current download progress in terminal. \"\"\" url_path =", "from sys import stdout from pathlib import Path import re import os class", "except : print('invalid name, taking name from url') name = re.split(r'[?]',url_path.name)[0] # if", "or without extension, takes extension from url if not specified. `printprogress`: Prints current", "url, get rid of it extension = re.search(r'(?<=[.])\\w+$', name).group() # matching only extension", "# continue #if d_path.exists(): return d_path def name_loop(): while True: inp = input('Name:\\n')", "Default download path is current working directory. `name_out`: Default name is the tail", "_test_write(inp): return inp #try: # d_path = Path(inp) #except Exception as e: #", "\"\"\" # in case its a number and not None if name_in and", "extension if len(name_parts) > 1: name_noext = '.'.join(name_parts[:-1]) # joining together without extension", "#if d_path.exists(): return d_path def name_loop(): while True: inp = input('Name:\\n') return inp", "writes a file to the path and returns True if it succeded '''", "its empty it raises exception # clean_name = re.search(r'\\w+',name_in).group() # parsing name, only", "path:\\n') if _test_write(inp): return inp #try: # d_path = Path(inp) #except Exception as", "a string \"\"\" FULL_BLOCKLENGTH = 32 fillblock = '█' if progress > 1:", "specified and not in the name else: name = re.split(r'[?]',url_path.name)[0] # if '?'", "> 1: name_noext = '.'.join(name_parts[:-1]) # joining together without extension else: name_noext =", "progress): \"\"\" progress must be between 0 and 1\\n Returns the bar with", "joining together without extension else: name_noext = name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) #", "= re.split(r'[.].+$',name_in)[0] # name without extension name_parts = name_in.split('.') # name without extension", "directory. `name_out`: Default name is the tail of the url address, can take", "self.cwd = Path.cwd() self.src_path = Path(__file__) if not session: self.session = rq.Session() else:", "is current working directory. `name_out`: Default name is the tail of the url", "# making file path save_file = download_path / name_out # checking if file", "extension after last \".\" name_path = Path(f'{clean_name}.{extension}') # extension from url return name_path.name", "= 1 blocks = int(progress / (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end = (33", "if extension: name_path = Path(f'{clean_name}.{extension}') # custom extension specified and not in the", "1: progress = 1 blocks = int(progress / (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end", "can take in a name with or without extension, takes extension from url", "without extension, takes extension from url if not specified. `printprogress`: Prints current download", "print('Done') def input_loop(): while True: inp = input('Download path:\\n') if _test_write(inp): return inp", "if not session: self.session = rq.Session() else: self.session = session def _print_progress(self, current_bytes,", "failed: ',e) pass return writable if __name__ == \"__main__\": # d_path = input_loop()", "without extension else: name_noext = name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name,", "= self.cwd / url_path.name if not d_path else Path(d_path) name_out = self._make_name(url_path, name_out)", "download d_path = Path('/home/bruno/Desktop') name = name_loop() # let user decide what name", "name try: extension = re.search(r'(?<=[.])\\w+$', name_in).group() # matching only extension after last \".\"", "session=None): # creates a session self.cwd = Path.cwd() self.src_path = Path(__file__) if not", "os.remove(test_file) except Exception as e: #print('deleting test write failed: ',e) pass return writable", "else: name_noext = name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only alphanumeric,", "class Downloader: \"\"\" class to manage downloading url links \"\"\" def __init__(self, *args,", "Path(url) #download_path = self.cwd / url_path.name if not d_path else Path(d_path) name_out =", "' text = bar_start+bar_end+bar_percent return text def _make_name(self, url_path: Path, name_in: str): \"\"\"", "last \".\" name_path = Path(f'{clean_name}.{extension}') # extension from url return name_path.name def download(self,", "\".\" name_path = Path(f'{clean_name}.{extension}') # extension from url return name_path.name def download(self, url,", "# extension = name.split('.')[-1] # matching only extension after last \".\" except: extension", "bar_start = fillblock*blocks bar_end = (33 - len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f} %", "creates a session self.cwd = Path.cwd() self.src_path = Path(__file__) if not session: self.session", "type(str): name_in = str(name_in) try: name_in[0] # if its empty it raises exception", "def download(self, url, d_path=None, name_out=None, printprogess=False): \"\"\" Downloads from url `d_path`: Default download", "empty testing except : print('invalid name, taking name from url') name = re.split(r'[?]',url_path.name)[0]", "while True: inp = input('Name:\\n') return inp def _test_write(path): ''' writes a file", "writable if __name__ == \"__main__\": # d_path = input_loop() #let user decide where", "name_in = str(name_in) try: name_in[0] # if its empty it raises exception #", "Path.cwd() self.src_path = Path(__file__) if not session: self.session = rq.Session() else: self.session =", "only extension after last \".\" name_path = Path(f'{clean_name}.{extension}') # extension from url return", "#let user decide where to download d_path = Path('/home/bruno/Desktop') name = name_loop() #", "re.split(r'[.].+$',name_in)[0] # name without extension name_parts = name_in.split('.') # name without extension if", "> 1: progress = 1 blocks = int(progress / (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks", "test failed: ',e) return finally: try: os.remove(test_file) except Exception as e: #print('deleting test", "links \"\"\" def __init__(self, *args, session=None): # creates a session self.cwd = Path.cwd()", "progress in terminal. \"\"\" url_path = Path(url) #download_path = self.cwd / url_path.name if", "from pathlib import Path import re import os class Downloader: \"\"\" class to", "#print('deleting test write failed: ',e) pass return writable if __name__ == \"__main__\": #", "d_path def name_loop(): while True: inp = input('Name:\\n') return inp def _test_write(path): '''", "as e: print('write test failed: ',e) return finally: try: os.remove(test_file) except Exception as", "{save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp += 1024 if printprogess:", "- len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f} % ' text = bar_start+bar_end+bar_percent return text", "in url, get rid of it return name try: extension = re.search(r'(?<=[.])\\w+$', name_in).group()", "None if name_in and name_in != type(str): name_in = str(name_in) try: name_in[0] #", "writable = False try: p = Path(path) test_file = p / 'testfile.testfile' with", "is the tail of the url address, can take in a name with", "rid of it extension = re.search(r'(?<=[.])\\w+$', name).group() # matching only extension after last", "(33 - len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f} % ' text = bar_start+bar_end+bar_percent return", "Path(inp) #except Exception as e: # print('invalid path, try again\\n') # continue #if", "str): \"\"\" Parses the name and returns a writebale name \"\"\" # in", "''' writes a file to the path and returns True if it succeded", "size): bar = self._get_bar(current_bytes / size) output = f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB'", "name, only alphanumeric, no whitespace # name = re.split(r'[.].+$',name_in)[0] # name without extension", "return name try: extension = re.search(r'(?<=[.])\\w+$', name_in).group() # matching only extension after last", "# extension from url return name_path.name def download(self, url, d_path=None, name_out=None, printprogess=False): \"\"\"", "url, d_path=None, name_out=None, printprogess=False): \"\"\" Downloads from url `d_path`: Default download path is", "if printprogess: self._print_progress(tmp, size) print('') print('Done') def input_loop(): while True: inp = input('Download", "failed: ',e) return finally: try: os.remove(test_file) except Exception as e: #print('deleting test write", "download_path = self.cwd else: download_path = Path(d_path) # os.chdir(download_path) # making file path", "what name it will have d = Downloader() test_image_url = 'https://images.pexels.com/photos/459793/pexels-photo-459793.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260' d.download(test_image_url, d_path,", "# parsing name, only alphanumeric, no whitespace clean_name[0] # empty testing except :", "True: inp = input('Name:\\n') return inp def _test_write(path): ''' writes a file to", "p = Path(path) test_file = p / 'testfile.testfile' with open(test_file, 'wb') as f:", "file already is there if save_file.exists(): print('skipping', save_file.name) return r = self.session.get(url) #", "float(r.headers['content-length']) contentlength = r.headers.get('content-length') if contentlength is not None: size = float(contentlength) else:", "fillblock = '█' if progress > 1: progress = 1 blocks = int(progress", "from url return name_path.name def download(self, url, d_path=None, name_out=None, printprogess=False): \"\"\" Downloads from", "/ url_path.name if not d_path else Path(d_path) name_out = self._make_name(url_path, name_out) if not", "the tail of the url address, can take in a name with or", "url if not specified. `printprogress`: Prints current download progress in terminal. \"\"\" url_path", "False try: p = Path(path) test_file = p / 'testfile.testfile' with open(test_file, 'wb')", "extension, takes extension from url if not specified. `printprogress`: Prints current download progress", "if '?' in url, get rid of it extension = re.search(r'(?<=[.])\\w+$', name).group() #", "= '.'.join(name_parts[:-1]) # joining together without extension else: name_noext = name_parts[0] clean_name =", "def _get_bar(self, progress): \"\"\" progress must be between 0 and 1\\n Returns the", "name.split('.')[-1] # matching only extension after last \".\" except: extension = None if", "string \"\"\" FULL_BLOCKLENGTH = 32 fillblock = '█' if progress > 1: progress", "= True except Exception as e: print('write test failed: ',e) return finally: try:", "last \".\" # extension = name.split('.')[-1] # matching only extension after last \".\"", "as e: #print('deleting test write failed: ',e) pass return writable if __name__ ==", "name_noext = name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only alphanumeric, no", "e: # print('invalid path, try again\\n') # continue #if d_path.exists(): return d_path def", "inp #try: # d_path = Path(inp) #except Exception as e: # print('invalid path,", "fd.write(chunk) tmp += 1024 if printprogess: self._print_progress(tmp, size) print('') print('Done') def input_loop(): while", "= session def _print_progress(self, current_bytes, size): bar = self._get_bar(current_bytes / size) output =", "',e) pass return writable if __name__ == \"__main__\": # d_path = input_loop() #let", "self.session = session def _print_progress(self, current_bytes, size): bar = self._get_bar(current_bytes / size) output", "Downloader: \"\"\" class to manage downloading url links \"\"\" def __init__(self, *args, session=None):", "= 1 with open(save_file, 'wb') as fd: tmp = 0 print(f'Downloding: {save_file.name}') print(f'to", "current_bytes, size): bar = self._get_bar(current_bytes / size) output = f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f}", "with current progress as a string \"\"\" FULL_BLOCKLENGTH = 32 fillblock = '█'", "/ {size/1000000:0.2f} MB' stdout.write(output) # stdout.flush() def _get_bar(self, progress): \"\"\" progress must be", "name_in[0] # if its empty it raises exception # clean_name = re.search(r'\\w+',name_in).group() #", "succeded ''' writable = False try: p = Path(path) test_file = p /", "extension from url if not specified. `printprogress`: Prints current download progress in terminal.", "name_out # checking if file already is there if save_file.exists(): print('skipping', save_file.name) return", "chunk: fd.write(chunk) tmp += 1024 if printprogess: self._print_progress(tmp, size) print('') print('Done') def input_loop():", "continue #if d_path.exists(): return d_path def name_loop(): while True: inp = input('Name:\\n') return", "% ' text = bar_start+bar_end+bar_percent return text def _make_name(self, url_path: Path, name_in: str):", "together without extension else: name_noext = name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing", "self._print_progress(tmp, size) print('') print('Done') def input_loop(): while True: inp = input('Download path:\\n') if", "self.session.get(url) # size = float(r.headers['content-length']) contentlength = r.headers.get('content-length') if contentlength is not None:", "name_loop(): while True: inp = input('Name:\\n') return inp def _test_write(path): ''' writes a", "and not None if name_in and name_in != type(str): name_in = str(name_in) try:", "0 and 1\\n Returns the bar with current progress as a string \"\"\"", "takes extension from url if not specified. `printprogress`: Prints current download progress in", "self.src_path = Path(__file__) if not session: self.session = rq.Session() else: self.session = session", "finally: try: os.remove(test_file) except Exception as e: #print('deleting test write failed: ',e) pass", "session self.cwd = Path.cwd() self.src_path = Path(__file__) if not session: self.session = rq.Session()", "number and not None if name_in and name_in != type(str): name_in = str(name_in)", "f: f.write(bytes(0)) writable = True except Exception as e: print('write test failed: ',e)", "rq.Session() else: self.session = session def _print_progress(self, current_bytes, size): bar = self._get_bar(current_bytes /", "if save_file.exists(): print('skipping', save_file.name) return r = self.session.get(url) # size = float(r.headers['content-length']) contentlength", "stdout from pathlib import Path import re import os class Downloader: \"\"\" class", "open(test_file, 'wb') as f: f.write(bytes(0)) writable = True except Exception as e: print('write", "# if its empty it raises exception # clean_name = re.search(r'\\w+',name_in).group() # parsing", "/ (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end = (33 - len(bar_start))*'_'+'|' bar_percent = f'", "if not d_path: # download_path = self.src_path.parent download_path = self.cwd else: download_path =", "{size/1000000:0.2f} MB' stdout.write(output) # stdout.flush() def _get_bar(self, progress): \"\"\" progress must be between", "if progress > 1: progress = 1 blocks = int(progress / (1/FULL_BLOCKLENGTH)) bar_start", "return r = self.session.get(url) # size = float(r.headers['content-length']) contentlength = r.headers.get('content-length') if contentlength", "requests as rq from sys import stdout from pathlib import Path import re", "= re.search(r'\\w+',name_in).group() # parsing name, only alphanumeric, no whitespace # name = re.split(r'[.].+$',name_in)[0]", "= self.cwd else: download_path = Path(d_path) # os.chdir(download_path) # making file path save_file", "stdout.flush() def _get_bar(self, progress): \"\"\" progress must be between 0 and 1\\n Returns", "between 0 and 1\\n Returns the bar with current progress as a string", "of it return name try: extension = re.search(r'(?<=[.])\\w+$', name_in).group() # matching only extension", "*args, session=None): # creates a session self.cwd = Path.cwd() self.src_path = Path(__file__) if", "path, try again\\n') # continue #if d_path.exists(): return d_path def name_loop(): while True:", "not specified. `printprogress`: Prints current download progress in terminal. \"\"\" url_path = Path(url)", "= input('Download path:\\n') if _test_write(inp): return inp #try: # d_path = Path(inp) #except", "import requests as rq from sys import stdout from pathlib import Path import", "= self._make_name(url_path, name_out) if not d_path: # download_path = self.src_path.parent download_path = self.cwd", "Path(__file__) if not session: self.session = rq.Session() else: self.session = session def _print_progress(self,", "if file already is there if save_file.exists(): print('skipping', save_file.name) return r = self.session.get(url)", "and name_in != type(str): name_in = str(name_in) try: name_in[0] # if its empty", "name and returns a writebale name \"\"\" # in case its a number", "decide what name it will have d = Downloader() test_image_url = 'https://images.pexels.com/photos/459793/pexels-photo-459793.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260' d.download(test_image_url,", "Path, name_in: str): \"\"\" Parses the name and returns a writebale name \"\"\"", "is there if save_file.exists(): print('skipping', save_file.name) return r = self.session.get(url) # size =", "= False try: p = Path(path) test_file = p / 'testfile.testfile' with open(test_file,", "save_file.exists(): print('skipping', save_file.name) return r = self.session.get(url) # size = float(r.headers['content-length']) contentlength =", "# let user decide what name it will have d = Downloader() test_image_url", "if '?' in url, get rid of it return name try: extension =", "name_in: str): \"\"\" Parses the name and returns a writebale name \"\"\" #", "exception # clean_name = re.search(r'\\w+',name_in).group() # parsing name, only alphanumeric, no whitespace #", "try: name_in[0] # if its empty it raises exception # clean_name = re.search(r'\\w+',name_in).group()", "MB' stdout.write(output) # stdout.flush() def _get_bar(self, progress): \"\"\" progress must be between 0", "extension else: name_noext = name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only", "else Path(d_path) name_out = self._make_name(url_path, name_out) if not d_path: # download_path = self.src_path.parent", "= rq.Session() else: self.session = session def _print_progress(self, current_bytes, size): bar = self._get_bar(current_bytes", "the path and returns True if it succeded ''' writable = False try:", "without extension name_parts = name_in.split('.') # name without extension if len(name_parts) > 1:", "!= type(str): name_in = str(name_in) try: name_in[0] # if its empty it raises", "again\\n') # continue #if d_path.exists(): return d_path def name_loop(): while True: inp =", "= re.search(r'(?<=[.])\\w+$', name).group() # matching only extension after last \".\" name_path = Path(f'{clean_name}.{extension}')", "= name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only alphanumeric, no whitespace", "returns a writebale name \"\"\" # in case its a number and not", "re import os class Downloader: \"\"\" class to manage downloading url links \"\"\"", "a writebale name \"\"\" # in case its a number and not None", "Path import re import os class Downloader: \"\"\" class to manage downloading url", "and not in the name else: name = re.split(r'[?]',url_path.name)[0] # if '?' in", "as e: # print('invalid path, try again\\n') # continue #if d_path.exists(): return d_path", "url_path = Path(url) #download_path = self.cwd / url_path.name if not d_path else Path(d_path)", "bar with current progress as a string \"\"\" FULL_BLOCKLENGTH = 32 fillblock =", "it return name try: extension = re.search(r'(?<=[.])\\w+$', name_in).group() # matching only extension after", "to download d_path = Path('/home/bruno/Desktop') name = name_loop() # let user decide what", "if name_in and name_in != type(str): name_in = str(name_in) try: name_in[0] # if", "file path save_file = download_path / name_out # checking if file already is", "# os.chdir(download_path) # making file path save_file = download_path / name_out # checking", "print(f'to {save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp += 1024 if", "last \".\" except: extension = None if extension: name_path = Path(f'{clean_name}.{extension}') # custom", "# name without extension if len(name_parts) > 1: name_noext = '.'.join(name_parts[:-1]) # joining", "already is there if save_file.exists(): print('skipping', save_file.name) return r = self.session.get(url) # size", "\"\"\" Downloads from url `d_path`: Default download path is current working directory. `name_out`:", "url address, can take in a name with or without extension, takes extension", "except Exception as e: #print('deleting test write failed: ',e) pass return writable if", "f' {progress*100:0.2f} % ' text = bar_start+bar_end+bar_percent return text def _make_name(self, url_path: Path,", "= (33 - len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f} % ' text = bar_start+bar_end+bar_percent", "if contentlength is not None: size = float(contentlength) else: size = 1 with", "= re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it extension =", "rq from sys import stdout from pathlib import Path import re import os", "'?' in url, get rid of it return name try: extension = re.search(r'(?<=[.])\\w+$',", "download_path = Path(d_path) # os.chdir(download_path) # making file path save_file = download_path /", "name from url') name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid", "= r.headers.get('content-length') if contentlength is not None: size = float(contentlength) else: size =", "'?' in url, get rid of it extension = re.search(r'(?<=[.])\\w+$', name).group() # matching", "Path(path) test_file = p / 'testfile.testfile' with open(test_file, 'wb') as f: f.write(bytes(0)) writable", "and 1\\n Returns the bar with current progress as a string \"\"\" FULL_BLOCKLENGTH", "take in a name with or without extension, takes extension from url if", "= p / 'testfile.testfile' with open(test_file, 'wb') as f: f.write(bytes(0)) writable = True", "re.search(r'(?<=[.])\\w+$', name_in).group() # matching only extension after last \".\" # extension = name.split('.')[-1]", "a session self.cwd = Path.cwd() self.src_path = Path(__file__) if not session: self.session =", "'.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only alphanumeric, no whitespace clean_name[0] # empty testing except", "pathlib import Path import re import os class Downloader: \"\"\" class to manage", "matching only extension after last \".\" name_path = Path(f'{clean_name}.{extension}') # extension from url", "sys import stdout from pathlib import Path import re import os class Downloader:", "None if extension: name_path = Path(f'{clean_name}.{extension}') # custom extension specified and not in", "#download_path = self.cwd / url_path.name if not d_path else Path(d_path) name_out = self._make_name(url_path,", "except Exception as e: print('write test failed: ',e) return finally: try: os.remove(test_file) except", "= 0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk)", "current progress as a string \"\"\" FULL_BLOCKLENGTH = 32 fillblock = '█' if", "extension name_parts = name_in.split('.') # name without extension if len(name_parts) > 1: name_noext", "it extension = re.search(r'(?<=[.])\\w+$', name).group() # matching only extension after last \".\" name_path", "d_path=None, name_out=None, printprogess=False): \"\"\" Downloads from url `d_path`: Default download path is current", "contentlength is not None: size = float(contentlength) else: size = 1 with open(save_file,", "no whitespace # name = re.split(r'[.].+$',name_in)[0] # name without extension name_parts = name_in.split('.')", "# matching only extension after last \".\" name_path = Path(f'{clean_name}.{extension}') # extension from", "size) print('') print('Done') def input_loop(): while True: inp = input('Download path:\\n') if _test_write(inp):", "name = name_loop() # let user decide what name it will have d", "to the path and returns True if it succeded ''' writable = False", "= Path(f'{clean_name}.{extension}') # extension from url return name_path.name def download(self, url, d_path=None, name_out=None,", "current working directory. `name_out`: Default name is the tail of the url address,", "return inp def _test_write(path): ''' writes a file to the path and returns", "a file to the path and returns True if it succeded ''' writable", "try: os.remove(test_file) except Exception as e: #print('deleting test write failed: ',e) pass return", "with open(save_file, 'wb') as fd: tmp = 0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for", "Path(d_path) name_out = self._make_name(url_path, name_out) if not d_path: # download_path = self.src_path.parent download_path", "it will have d = Downloader() test_image_url = 'https://images.pexels.com/photos/459793/pexels-photo-459793.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260' d.download(test_image_url, d_path, name, printprogess=False)", "user decide where to download d_path = Path('/home/bruno/Desktop') name = name_loop() # let", "_test_write(path): ''' writes a file to the path and returns True if it", "re.search(r'\\w+',name_in).group() # parsing name, only alphanumeric, no whitespace # name = re.split(r'[.].+$',name_in)[0] #", "extension after last \".\" # extension = name.split('.')[-1] # matching only extension after", "making file path save_file = download_path / name_out # checking if file already", "no whitespace clean_name[0] # empty testing except : print('invalid name, taking name from", "from url') name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of", "name_parts = name_in.split('.') # name without extension if len(name_parts) > 1: name_noext =", "d_path.exists(): return d_path def name_loop(): while True: inp = input('Name:\\n') return inp def", "raises exception # clean_name = re.search(r'\\w+',name_in).group() # parsing name, only alphanumeric, no whitespace", "extension = None if extension: name_path = Path(f'{clean_name}.{extension}') # custom extension specified and", "matching only extension after last \".\" # extension = name.split('.')[-1] # matching only", "str(name_in) try: name_in[0] # if its empty it raises exception # clean_name =", "(1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end = (33 - len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f}", "def _test_write(path): ''' writes a file to the path and returns True if", "= re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it return name", "except: extension = None if extension: name_path = Path(f'{clean_name}.{extension}') # custom extension specified", "of the url address, can take in a name with or without extension,", "address, can take in a name with or without extension, takes extension from", "f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output) # stdout.flush() def _get_bar(self, progress): \"\"\" progress", "name without extension if len(name_parts) > 1: name_noext = '.'.join(name_parts[:-1]) # joining together", "\"\"\" Parses the name and returns a writebale name \"\"\" # in case", "name, only alphanumeric, no whitespace clean_name[0] # empty testing except : print('invalid name,", "`name_out`: Default name is the tail of the url address, can take in", "_make_name(self, url_path: Path, name_in: str): \"\"\" Parses the name and returns a writebale", "= Path(url) #download_path = self.cwd / url_path.name if not d_path else Path(d_path) name_out", "return text def _make_name(self, url_path: Path, name_in: str): \"\"\" Parses the name and", "def input_loop(): while True: inp = input('Download path:\\n') if _test_write(inp): return inp #try:", "= bar_start+bar_end+bar_percent return text def _make_name(self, url_path: Path, name_in: str): \"\"\" Parses the", "print('') print('Done') def input_loop(): while True: inp = input('Download path:\\n') if _test_write(inp): return", "from url if not specified. `printprogress`: Prints current download progress in terminal. \"\"\"", "/ name_out # checking if file already is there if save_file.exists(): print('skipping', save_file.name)", "import Path import re import os class Downloader: \"\"\" class to manage downloading", "if chunk: fd.write(chunk) tmp += 1024 if printprogess: self._print_progress(tmp, size) print('') print('Done') def", "progress must be between 0 and 1\\n Returns the bar with current progress", "contentlength = r.headers.get('content-length') if contentlength is not None: size = float(contentlength) else: size", "os.chdir(download_path) # making file path save_file = download_path / name_out # checking if", "progress = 1 blocks = int(progress / (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end =", "in the name else: name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get", "text = bar_start+bar_end+bar_percent return text def _make_name(self, url_path: Path, name_in: str): \"\"\" Parses", "to manage downloading url links \"\"\" def __init__(self, *args, session=None): # creates a", "tail of the url address, can take in a name with or without", "= Path(path) test_file = p / 'testfile.testfile' with open(test_file, 'wb') as f: f.write(bytes(0))", "{progress*100:0.2f} % ' text = bar_start+bar_end+bar_percent return text def _make_name(self, url_path: Path, name_in:", "output = f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output) # stdout.flush() def _get_bar(self, progress):", "'wb') as f: f.write(bytes(0)) writable = True except Exception as e: print('write test", "pass return writable if __name__ == \"__main__\": # d_path = input_loop() #let user", "whitespace # name = re.split(r'[.].+$',name_in)[0] # name without extension name_parts = name_in.split('.') #", "path is current working directory. `name_out`: Default name is the tail of the", "not d_path: # download_path = self.src_path.parent download_path = self.cwd else: download_path = Path(d_path)", "return writable if __name__ == \"__main__\": # d_path = input_loop() #let user decide", "Default name is the tail of the url address, can take in a", "for chunk in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp += 1024 if printprogess: self._print_progress(tmp,", "= re.search(r'(?<=[.])\\w+$', name_in).group() # matching only extension after last \".\" # extension =", "in terminal. \"\"\" url_path = Path(url) #download_path = self.cwd / url_path.name if not", "= float(contentlength) else: size = 1 with open(save_file, 'wb') as fd: tmp =", "print('invalid name, taking name from url') name = re.split(r'[?]',url_path.name)[0] # if '?' in", "it succeded ''' writable = False try: p = Path(path) test_file = p", "# clean_name = re.search(r'\\w+',name_in).group() # parsing name, only alphanumeric, no whitespace # name", "True if it succeded ''' writable = False try: p = Path(path) test_file", "re.search(r'(?<=[.])\\w+$', name).group() # matching only extension after last \".\" name_path = Path(f'{clean_name}.{extension}') #", "manage downloading url links \"\"\" def __init__(self, *args, session=None): # creates a session", "Prints current download progress in terminal. \"\"\" url_path = Path(url) #download_path = self.cwd", "terminal. \"\"\" url_path = Path(url) #download_path = self.cwd / url_path.name if not d_path", "input_loop(): while True: inp = input('Download path:\\n') if _test_write(inp): return inp #try: #", "e: print('write test failed: ',e) return finally: try: os.remove(test_file) except Exception as e:", "with open(test_file, 'wb') as f: f.write(bytes(0)) writable = True except Exception as e:", "+= 1024 if printprogess: self._print_progress(tmp, size) print('') print('Done') def input_loop(): while True: inp", ": print('invalid name, taking name from url') name = re.split(r'[?]',url_path.name)[0] # if '?'", "= f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output) # stdout.flush() def _get_bar(self, progress): \"\"\"", "= float(r.headers['content-length']) contentlength = r.headers.get('content-length') if contentlength is not None: size = float(contentlength)", "stdout.write(output) # stdout.flush() def _get_bar(self, progress): \"\"\" progress must be between 0 and", "test_file = p / 'testfile.testfile' with open(test_file, 'wb') as f: f.write(bytes(0)) writable =", "class to manage downloading url links \"\"\" def __init__(self, *args, session=None): # creates", "download_path = self.src_path.parent download_path = self.cwd else: download_path = Path(d_path) # os.chdir(download_path) #", "d_path = input_loop() #let user decide where to download d_path = Path('/home/bruno/Desktop') name", "{save_file.name}') print(f'to {save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp += 1024", "else: self.session = session def _print_progress(self, current_bytes, size): bar = self._get_bar(current_bytes / size)", "from url `d_path`: Default download path is current working directory. `name_out`: Default name", "with or without extension, takes extension from url if not specified. `printprogress`: Prints", "name_in != type(str): name_in = str(name_in) try: name_in[0] # if its empty it", "bar_percent = f' {progress*100:0.2f} % ' text = bar_start+bar_end+bar_percent return text def _make_name(self,", "parsing name, only alphanumeric, no whitespace clean_name[0] # empty testing except : print('invalid", "extension from url return name_path.name def download(self, url, d_path=None, name_out=None, printprogess=False): \"\"\" Downloads", "/ 'testfile.testfile' with open(test_file, 'wb') as f: f.write(bytes(0)) writable = True except Exception", "print('write test failed: ',e) return finally: try: os.remove(test_file) except Exception as e: #print('deleting", "e: #print('deleting test write failed: ',e) pass return writable if __name__ == \"__main__\":", "it raises exception # clean_name = re.search(r'\\w+',name_in).group() # parsing name, only alphanumeric, no", "# name without extension name_parts = name_in.split('.') # name without extension if len(name_parts)", "#try: # d_path = Path(inp) #except Exception as e: # print('invalid path, try", "# creates a session self.cwd = Path.cwd() self.src_path = Path(__file__) if not session:", "session def _print_progress(self, current_bytes, size): bar = self._get_bar(current_bytes / size) output = f'\\r{bar}", "' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only alphanumeric, no whitespace clean_name[0] # empty testing", "= None if extension: name_path = Path(f'{clean_name}.{extension}') # custom extension specified and not", "Path('/home/bruno/Desktop') name = name_loop() # let user decide what name it will have", "print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp +=", "\".\" except: extension = None if extension: name_path = Path(f'{clean_name}.{extension}') # custom extension", "'testfile.testfile' with open(test_file, 'wb') as f: f.write(bytes(0)) writable = True except Exception as", "self.src_path.parent download_path = self.cwd else: download_path = Path(d_path) # os.chdir(download_path) # making file", "as a string \"\"\" FULL_BLOCKLENGTH = 32 fillblock = '█' if progress >", "# d_path = Path(inp) #except Exception as e: # print('invalid path, try again\\n')", "text def _make_name(self, url_path: Path, name_in: str): \"\"\" Parses the name and returns", "only alphanumeric, no whitespace # name = re.split(r'[.].+$',name_in)[0] # name without extension name_parts", "bar_start+bar_end+bar_percent return text def _make_name(self, url_path: Path, name_in: str): \"\"\" Parses the name", "''' writable = False try: p = Path(path) test_file = p / 'testfile.testfile'", "\"\"\" FULL_BLOCKLENGTH = 32 fillblock = '█' if progress > 1: progress =", "else: download_path = Path(d_path) # os.chdir(download_path) # making file path save_file = download_path", "after last \".\" name_path = Path(f'{clean_name}.{extension}') # extension from url return name_path.name def", "Path(d_path) # os.chdir(download_path) # making file path save_file = download_path / name_out #", "session: self.session = rq.Session() else: self.session = session def _print_progress(self, current_bytes, size): bar", "= name.split('.')[-1] # matching only extension after last \".\" except: extension = None", "Path(f'{clean_name}.{extension}') # extension from url return name_path.name def download(self, url, d_path=None, name_out=None, printprogess=False):", "not None if name_in and name_in != type(str): name_in = str(name_in) try: name_in[0]", "clean_name[0] # empty testing except : print('invalid name, taking name from url') name", "`printprogress`: Prints current download progress in terminal. \"\"\" url_path = Path(url) #download_path =", "download_path / name_out # checking if file already is there if save_file.exists(): print('skipping',", "bar = self._get_bar(current_bytes / size) output = f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output)", "= self._get_bar(current_bytes / size) output = f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output) #", "= str(name_in) try: name_in[0] # if its empty it raises exception # clean_name", "clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only alphanumeric, no whitespace clean_name[0] #", "os class Downloader: \"\"\" class to manage downloading url links \"\"\" def __init__(self,", "decide where to download d_path = Path('/home/bruno/Desktop') name = name_loop() # let user", "Parses the name and returns a writebale name \"\"\" # in case its", "extension specified and not in the name else: name = re.split(r'[?]',url_path.name)[0] # if", "url_path: Path, name_in: str): \"\"\" Parses the name and returns a writebale name", "writable = True except Exception as e: print('write test failed: ',e) return finally:", "# parsing name, only alphanumeric, no whitespace # name = re.split(r'[.].+$',name_in)[0] # name", "1 blocks = int(progress / (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end = (33 -", "where to download d_path = Path('/home/bruno/Desktop') name = name_loop() # let user decide", "name_in.split('.') # name without extension if len(name_parts) > 1: name_noext = '.'.join(name_parts[:-1]) #", "self.session = rq.Session() else: self.session = session def _print_progress(self, current_bytes, size): bar =", "\"\"\" progress must be between 0 and 1\\n Returns the bar with current", "__init__(self, *args, session=None): # creates a session self.cwd = Path.cwd() self.src_path = Path(__file__)", "taking name from url') name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get", "download progress in terminal. \"\"\" url_path = Path(url) #download_path = self.cwd / url_path.name", "\"__main__\": # d_path = input_loop() #let user decide where to download d_path =", "name_noext = '.'.join(name_parts[:-1]) # joining together without extension else: name_noext = name_parts[0] clean_name", "p / 'testfile.testfile' with open(test_file, 'wb') as f: f.write(bytes(0)) writable = True except", "in case its a number and not None if name_in and name_in !=", "file to the path and returns True if it succeded ''' writable =", "name_path = Path(f'{clean_name}.{extension}') # custom extension specified and not in the name else:", "name_loop() # let user decide what name it will have d = Downloader()", "= Path(d_path) # os.chdir(download_path) # making file path save_file = download_path / name_out", "write failed: ',e) pass return writable if __name__ == \"__main__\": # d_path =", "bar_end = (33 - len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f} % ' text =", "self._make_name(url_path, name_out) if not d_path: # download_path = self.src_path.parent download_path = self.cwd else:", "testing except : print('invalid name, taking name from url') name = re.split(r'[?]',url_path.name)[0] #", "try again\\n') # continue #if d_path.exists(): return d_path def name_loop(): while True: inp", "/ size) output = f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output) # stdout.flush() def", "# custom extension specified and not in the name else: name = re.split(r'[?]',url_path.name)[0]", "save_file = download_path / name_out # checking if file already is there if", "in a name with or without extension, takes extension from url if not", "case its a number and not None if name_in and name_in != type(str):", "in url, get rid of it extension = re.search(r'(?<=[.])\\w+$', name).group() # matching only", "name without extension name_parts = name_in.split('.') # name without extension if len(name_parts) >", "name = re.split(r'[.].+$',name_in)[0] # name without extension name_parts = name_in.split('.') # name without", "self.cwd else: download_path = Path(d_path) # os.chdir(download_path) # making file path save_file =", "r.headers.get('content-length') if contentlength is not None: size = float(contentlength) else: size = 1", "= int(progress / (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end = (33 - len(bar_start))*'_'+'|' bar_percent", "\"\"\" class to manage downloading url links \"\"\" def __init__(self, *args, session=None): #", "must be between 0 and 1\\n Returns the bar with current progress as", "progress as a string \"\"\" FULL_BLOCKLENGTH = 32 fillblock = '█' if progress", "tmp += 1024 if printprogess: self._print_progress(tmp, size) print('') print('Done') def input_loop(): while True:", "# d_path = input_loop() #let user decide where to download d_path = Path('/home/bruno/Desktop')", "Exception as e: print('write test failed: ',e) return finally: try: os.remove(test_file) except Exception", "rid of it return name try: extension = re.search(r'(?<=[.])\\w+$', name_in).group() # matching only", "printprogess: self._print_progress(tmp, size) print('') print('Done') def input_loop(): while True: inp = input('Download path:\\n')", "let user decide what name it will have d = Downloader() test_image_url =", "= self.session.get(url) # size = float(r.headers['content-length']) contentlength = r.headers.get('content-length') if contentlength is not", "size = float(r.headers['content-length']) contentlength = r.headers.get('content-length') if contentlength is not None: size =", "inp = input('Download path:\\n') if _test_write(inp): return inp #try: # d_path = Path(inp)", "{current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output) # stdout.flush() def _get_bar(self, progress): \"\"\" progress must", "returns True if it succeded ''' writable = False try: p = Path(path)", "= self.src_path.parent download_path = self.cwd else: download_path = Path(d_path) # os.chdir(download_path) # making", "matching only extension after last \".\" except: extension = None if extension: name_path", "name \"\"\" # in case its a number and not None if name_in", "extension after last \".\" except: extension = None if extension: name_path = Path(f'{clean_name}.{extension}')", "name).group() # matching only extension after last \".\" name_path = Path(f'{clean_name}.{extension}') # extension", "name with or without extension, takes extension from url if not specified. `printprogress`:", "not None: size = float(contentlength) else: size = 1 with open(save_file, 'wb') as", "def __init__(self, *args, session=None): # creates a session self.cwd = Path.cwd() self.src_path =", "= input_loop() #let user decide where to download d_path = Path('/home/bruno/Desktop') name =", "name else: name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of", "else: name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it", "# matching only extension after last \".\" # extension = name.split('.')[-1] # matching", "re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it return name try:", "save_file.name) return r = self.session.get(url) # size = float(r.headers['content-length']) contentlength = r.headers.get('content-length') if", "user decide what name it will have d = Downloader() test_image_url = 'https://images.pexels.com/photos/459793/pexels-photo-459793.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260'", "alphanumeric, no whitespace clean_name[0] # empty testing except : print('invalid name, taking name", "get rid of it return name try: extension = re.search(r'(?<=[.])\\w+$', name_in).group() # matching", "True: inp = input('Download path:\\n') if _test_write(inp): return inp #try: # d_path =", "name is the tail of the url address, can take in a name", "while True: inp = input('Download path:\\n') if _test_write(inp): return inp #try: # d_path", "# checking if file already is there if save_file.exists(): print('skipping', save_file.name) return r", "d_path = Path(inp) #except Exception as e: # print('invalid path, try again\\n') #", "re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it extension = re.search(r'(?<=[.])\\w+$',", "alphanumeric, no whitespace # name = re.split(r'[.].+$',name_in)[0] # name without extension name_parts =", "= Path(inp) #except Exception as e: # print('invalid path, try again\\n') # continue", "return d_path def name_loop(): while True: inp = input('Name:\\n') return inp def _test_write(path):", "print('skipping', save_file.name) return r = self.session.get(url) # size = float(r.headers['content-length']) contentlength = r.headers.get('content-length')", "without extension if len(name_parts) > 1: name_noext = '.'.join(name_parts[:-1]) # joining together without", "url return name_path.name def download(self, url, d_path=None, name_out=None, printprogess=False): \"\"\" Downloads from url", "only extension after last \".\" except: extension = None if extension: name_path =", "the name else: name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid", "not d_path else Path(d_path) name_out = self._make_name(url_path, name_out) if not d_path: # download_path", "as rq from sys import stdout from pathlib import Path import re import", "float(contentlength) else: size = 1 with open(save_file, 'wb') as fd: tmp = 0", "the url address, can take in a name with or without extension, takes", "try: p = Path(path) test_file = p / 'testfile.testfile' with open(test_file, 'wb') as", "= 32 fillblock = '█' if progress > 1: progress = 1 blocks", "_print_progress(self, current_bytes, size): bar = self._get_bar(current_bytes / size) output = f'\\r{bar} {current_bytes/1000000:0.2f} /", "print('invalid path, try again\\n') # continue #if d_path.exists(): return d_path def name_loop(): while", "== \"__main__\": # d_path = input_loop() #let user decide where to download d_path", "download path is current working directory. `name_out`: Default name is the tail of", "Path(f'{clean_name}.{extension}') # custom extension specified and not in the name else: name =", "a number and not None if name_in and name_in != type(str): name_in =", "as fd: tmp = 0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024):", "= download_path / name_out # checking if file already is there if save_file.exists():", "its a number and not None if name_in and name_in != type(str): name_in", "the bar with current progress as a string \"\"\" FULL_BLOCKLENGTH = 32 fillblock", "name_out) if not d_path: # download_path = self.src_path.parent download_path = self.cwd else: download_path", "',e) return finally: try: os.remove(test_file) except Exception as e: #print('deleting test write failed:", "32 fillblock = '█' if progress > 1: progress = 1 blocks =", "# empty testing except : print('invalid name, taking name from url') name =", "not session: self.session = rq.Session() else: self.session = session def _print_progress(self, current_bytes, size):", "name_in).group() # matching only extension after last \".\" # extension = name.split('.')[-1] #", "_get_bar(self, progress): \"\"\" progress must be between 0 and 1\\n Returns the bar", "= Path('/home/bruno/Desktop') name = name_loop() # let user decide what name it will", "path save_file = download_path / name_out # checking if file already is there", "is not None: size = float(contentlength) else: size = 1 with open(save_file, 'wb')", "# size = float(r.headers['content-length']) contentlength = r.headers.get('content-length') if contentlength is not None: size", "def name_loop(): while True: inp = input('Name:\\n') return inp def _test_write(path): ''' writes", "return name_path.name def download(self, url, d_path=None, name_out=None, printprogess=False): \"\"\" Downloads from url `d_path`:", "1 with open(save_file, 'wb') as fd: tmp = 0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}')", "and returns True if it succeded ''' writable = False try: p =", "# print('invalid path, try again\\n') # continue #if d_path.exists(): return d_path def name_loop():", "fd: tmp = 0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024): if", "self._get_bar(current_bytes / size) output = f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output) # stdout.flush()", "url, get rid of it return name try: extension = re.search(r'(?<=[.])\\w+$', name_in).group() #", "only alphanumeric, no whitespace clean_name[0] # empty testing except : print('invalid name, taking", "= input('Name:\\n') return inp def _test_write(path): ''' writes a file to the path", "a name with or without extension, takes extension from url if not specified.", "FULL_BLOCKLENGTH = 32 fillblock = '█' if progress > 1: progress = 1", "url_path.name if not d_path else Path(d_path) name_out = self._make_name(url_path, name_out) if not d_path:", "get rid of it extension = re.search(r'(?<=[.])\\w+$', name).group() # matching only extension after", "downloading url links \"\"\" def __init__(self, *args, session=None): # creates a session self.cwd", "def _make_name(self, url_path: Path, name_in: str): \"\"\" Parses the name and returns a", "= Path(__file__) if not session: self.session = rq.Session() else: self.session = session def", "__name__ == \"__main__\": # d_path = input_loop() #let user decide where to download", "of it extension = re.search(r'(?<=[.])\\w+$', name).group() # matching only extension after last \".\"", "whitespace clean_name[0] # empty testing except : print('invalid name, taking name from url')", "f.write(bytes(0)) writable = True except Exception as e: print('write test failed: ',e) return", "download(self, url, d_path=None, name_out=None, printprogess=False): \"\"\" Downloads from url `d_path`: Default download path", "# matching only extension after last \".\" except: extension = None if extension:", "len(bar_start))*'_'+'|' bar_percent = f' {progress*100:0.2f} % ' text = bar_start+bar_end+bar_percent return text def", "Exception as e: # print('invalid path, try again\\n') # continue #if d_path.exists(): return", "\"\"\" def __init__(self, *args, session=None): # creates a session self.cwd = Path.cwd() self.src_path", "= Path.cwd() self.src_path = Path(__file__) if not session: self.session = rq.Session() else: self.session", "True except Exception as e: print('write test failed: ',e) return finally: try: os.remove(test_file)", "the name and returns a writebale name \"\"\" # in case its a", "and returns a writebale name \"\"\" # in case its a number and", "size = float(contentlength) else: size = 1 with open(save_file, 'wb') as fd: tmp", "= ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only alphanumeric, no whitespace clean_name[0] # empty", "input('Download path:\\n') if _test_write(inp): return inp #try: # d_path = Path(inp) #except Exception", "input('Name:\\n') return inp def _test_write(path): ''' writes a file to the path and", "int(progress / (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end = (33 - len(bar_start))*'_'+'|' bar_percent =", "name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext)) # parsing name, only alphanumeric, no whitespace clean_name[0]", "name_out=None, printprogess=False): \"\"\" Downloads from url `d_path`: Default download path is current working", "def _print_progress(self, current_bytes, size): bar = self._get_bar(current_bytes / size) output = f'\\r{bar} {current_bytes/1000000:0.2f}", "1024 if printprogess: self._print_progress(tmp, size) print('') print('Done') def input_loop(): while True: inp =", "Returns the bar with current progress as a string \"\"\" FULL_BLOCKLENGTH = 32", "'wb') as fd: tmp = 0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for chunk in", "if it succeded ''' writable = False try: p = Path(path) test_file =", "name_path = Path(f'{clean_name}.{extension}') # extension from url return name_path.name def download(self, url, d_path=None,", "if _test_write(inp): return inp #try: # d_path = Path(inp) #except Exception as e:", "inp def _test_write(path): ''' writes a file to the path and returns True", "return finally: try: os.remove(test_file) except Exception as e: #print('deleting test write failed: ',e)", "try: extension = re.search(r'(?<=[.])\\w+$', name_in).group() # matching only extension after last \".\" #", "url') name = re.split(r'[?]',url_path.name)[0] # if '?' in url, get rid of it", "tmp = 0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for chunk in r.iter_content(chunk_size=1024): if chunk:", "# if '?' in url, get rid of it return name try: extension", "name_in and name_in != type(str): name_in = str(name_in) try: name_in[0] # if its", "1: name_noext = '.'.join(name_parts[:-1]) # joining together without extension else: name_noext = name_parts[0]", "d_path else Path(d_path) name_out = self._make_name(url_path, name_out) if not d_path: # download_path =", "extension = re.search(r'(?<=[.])\\w+$', name).group() # matching only extension after last \".\" name_path =", "Exception as e: #print('deleting test write failed: ',e) pass return writable if __name__", "printprogess=False): \"\"\" Downloads from url `d_path`: Default download path is current working directory.", "`d_path`: Default download path is current working directory. `name_out`: Default name is the", "blocks = int(progress / (1/FULL_BLOCKLENGTH)) bar_start = fillblock*blocks bar_end = (33 - len(bar_start))*'_'+'|'", "only extension after last \".\" # extension = name.split('.')[-1] # matching only extension", "open(save_file, 'wb') as fd: tmp = 0 print(f'Downloding: {save_file.name}') print(f'to {save_file.absolute()}') for chunk", "custom extension specified and not in the name else: name = re.split(r'[?]',url_path.name)[0] #", "# stdout.flush() def _get_bar(self, progress): \"\"\" progress must be between 0 and 1\\n", "extension: name_path = Path(f'{clean_name}.{extension}') # custom extension specified and not in the name", "working directory. `name_out`: Default name is the tail of the url address, can", "writebale name \"\"\" # in case its a number and not None if", "extension = re.search(r'(?<=[.])\\w+$', name_in).group() # matching only extension after last \".\" # extension", "if not d_path else Path(d_path) name_out = self._make_name(url_path, name_out) if not d_path: #", "# name = re.split(r'[.].+$',name_in)[0] # name without extension name_parts = name_in.split('.') # name", "if its empty it raises exception # clean_name = re.search(r'\\w+',name_in).group() # parsing name,", "name_path.name def download(self, url, d_path=None, name_out=None, printprogess=False): \"\"\" Downloads from url `d_path`: Default", "# joining together without extension else: name_noext = name_parts[0] clean_name = ' '.join(re.findall(r'\\w+.+',name_noext))", "= Path(f'{clean_name}.{extension}') # custom extension specified and not in the name else: name", "parsing name, only alphanumeric, no whitespace # name = re.split(r'[.].+$',name_in)[0] # name without", "import os class Downloader: \"\"\" class to manage downloading url links \"\"\" def", "not in the name else: name = re.split(r'[?]',url_path.name)[0] # if '?' in url,", "path and returns True if it succeded ''' writable = False try: p", "import stdout from pathlib import Path import re import os class Downloader: \"\"\"", "d_path = Path('/home/bruno/Desktop') name = name_loop() # let user decide what name it", "empty it raises exception # clean_name = re.search(r'\\w+',name_in).group() # parsing name, only alphanumeric,", "clean_name = re.search(r'\\w+',name_in).group() # parsing name, only alphanumeric, no whitespace # name =", "name it will have d = Downloader() test_image_url = 'https://images.pexels.com/photos/459793/pexels-photo-459793.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260' d.download(test_image_url, d_path, name,", "= name_in.split('.') # name without extension if len(name_parts) > 1: name_noext = '.'.join(name_parts[:-1])", "chunk in r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp += 1024 if printprogess: self._print_progress(tmp, size)", "None: size = float(contentlength) else: size = 1 with open(save_file, 'wb') as fd:", "# if '?' in url, get rid of it extension = re.search(r'(?<=[.])\\w+$', name).group()", "current download progress in terminal. \"\"\" url_path = Path(url) #download_path = self.cwd /", "input_loop() #let user decide where to download d_path = Path('/home/bruno/Desktop') name = name_loop()", "progress > 1: progress = 1 blocks = int(progress / (1/FULL_BLOCKLENGTH)) bar_start =", "\"\"\" url_path = Path(url) #download_path = self.cwd / url_path.name if not d_path else", "checking if file already is there if save_file.exists(): print('skipping', save_file.name) return r =", "r.iter_content(chunk_size=1024): if chunk: fd.write(chunk) tmp += 1024 if printprogess: self._print_progress(tmp, size) print('') print('Done')", "Downloads from url `d_path`: Default download path is current working directory. `name_out`: Default", "if len(name_parts) > 1: name_noext = '.'.join(name_parts[:-1]) # joining together without extension else:", "= f' {progress*100:0.2f} % ' text = bar_start+bar_end+bar_percent return text def _make_name(self, url_path:", "size) output = f'\\r{bar} {current_bytes/1000000:0.2f} / {size/1000000:0.2f} MB' stdout.write(output) # stdout.flush() def _get_bar(self,", "'.'.join(name_parts[:-1]) # joining together without extension else: name_noext = name_parts[0] clean_name = '", "= '█' if progress > 1: progress = 1 blocks = int(progress /", "self.cwd / url_path.name if not d_path else Path(d_path) name_out = self._make_name(url_path, name_out) if", "# download_path = self.src_path.parent download_path = self.cwd else: download_path = Path(d_path) # os.chdir(download_path)", "url links \"\"\" def __init__(self, *args, session=None): # creates a session self.cwd =", "inp = input('Name:\\n') return inp def _test_write(path): ''' writes a file to the" ]
[ "Stuff with Python/02.00 chuva.py # Script da Chuva print('Está chovendo?') verificaChuva = input()", "checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize() == 'Sim': print('Espere um pouco') print('Está chovendo?') verificaChuva", "= input() if verificaChuva.capitalize() == 'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva = input() if", "Chuva print('Está chovendo?') verificaChuva = input() if verificaChuva.capitalize() == 'Sim': print('Você tem Guarda-chuva?')", "input() if checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize() == 'Sim': print('Espere um pouco') print('Está", "while verificaChuva.capitalize() == 'Sim': print('Espere um pouco') print('Está chovendo?') verificaChuva = input() print('Saia')", "input() if verificaChuva.capitalize() == 'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva = input() if checkGuardaChuva.capitalize()", "# Script da Chuva print('Está chovendo?') verificaChuva = input() if verificaChuva.capitalize() == 'Sim':", "Guarda-chuva?') checkGuardaChuva = input() if checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize() == 'Sim': print('Espere", "tem Guarda-chuva?') checkGuardaChuva = input() if checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize() == 'Sim':", "== 'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva = input() if checkGuardaChuva.capitalize() == 'Não': while", "Python/02.00 chuva.py # Script da Chuva print('Está chovendo?') verificaChuva = input() if verificaChuva.capitalize()", "verificaChuva = input() if verificaChuva.capitalize() == 'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva = input()", "print('Está chovendo?') verificaChuva = input() if verificaChuva.capitalize() == 'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva", "if verificaChuva.capitalize() == 'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva = input() if checkGuardaChuva.capitalize() ==", "'Não': while verificaChuva.capitalize() == 'Sim': print('Espere um pouco') print('Está chovendo?') verificaChuva = input()", "if checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize() == 'Sim': print('Espere um pouco') print('Está chovendo?')", "da Chuva print('Está chovendo?') verificaChuva = input() if verificaChuva.capitalize() == 'Sim': print('Você tem", "print('Você tem Guarda-chuva?') checkGuardaChuva = input() if checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize() ==", "verificaChuva.capitalize() == 'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva = input() if checkGuardaChuva.capitalize() == 'Não':", "Script da Chuva print('Está chovendo?') verificaChuva = input() if verificaChuva.capitalize() == 'Sim': print('Você", "chovendo?') verificaChuva = input() if verificaChuva.capitalize() == 'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva =", "with Python/02.00 chuva.py # Script da Chuva print('Está chovendo?') verificaChuva = input() if", "chuva.py # Script da Chuva print('Está chovendo?') verificaChuva = input() if verificaChuva.capitalize() ==", "== 'Não': while verificaChuva.capitalize() == 'Sim': print('Espere um pouco') print('Está chovendo?') verificaChuva =", "'Sim': print('Você tem Guarda-chuva?') checkGuardaChuva = input() if checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize()", "checkGuardaChuva = input() if checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize() == 'Sim': print('Espere um", "the Boring Stuff with Python/02.00 chuva.py # Script da Chuva print('Está chovendo?') verificaChuva", "= input() if checkGuardaChuva.capitalize() == 'Não': while verificaChuva.capitalize() == 'Sim': print('Espere um pouco')", "<filename>Automate the Boring Stuff with Python/02.00 chuva.py # Script da Chuva print('Está chovendo?')", "Boring Stuff with Python/02.00 chuva.py # Script da Chuva print('Está chovendo?') verificaChuva =" ]
[ "limit=None) -> chess.engine.PlayResult: legal_moves = list(board.legal_moves) move = random.choice(legal_moves) return chess.engine.PlayResult(move=move, ponder=None) def", "RandomPlayer: def __init__(self): self.id = { 'name': 'RandomPlayer' } def play(self, board: chess.Board,", "random import chess import chess.engine class RandomPlayer: def __init__(self): self.id = { 'name':", "-> chess.engine.PlayResult: legal_moves = list(board.legal_moves) move = random.choice(legal_moves) return chess.engine.PlayResult(move=move, ponder=None) def quit(self):", "'name': 'RandomPlayer' } def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves = list(board.legal_moves)", "= { 'name': 'RandomPlayer' } def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves", "import chess import chess.engine class RandomPlayer: def __init__(self): self.id = { 'name': 'RandomPlayer'", "self.id = { 'name': 'RandomPlayer' } def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult:", "play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves = list(board.legal_moves) move = random.choice(legal_moves) return", "chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves = list(board.legal_moves) move = random.choice(legal_moves) return chess.engine.PlayResult(move=move, ponder=None)", "def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves = list(board.legal_moves) move = random.choice(legal_moves)", "'RandomPlayer' } def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves = list(board.legal_moves) move", "__init__(self): self.id = { 'name': 'RandomPlayer' } def play(self, board: chess.Board, limit=None) ->", "import random import chess import chess.engine class RandomPlayer: def __init__(self): self.id = {", "chess.engine.PlayResult: legal_moves = list(board.legal_moves) move = random.choice(legal_moves) return chess.engine.PlayResult(move=move, ponder=None) def quit(self): pass", "{ 'name': 'RandomPlayer' } def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves =", "import chess.engine class RandomPlayer: def __init__(self): self.id = { 'name': 'RandomPlayer' } def", "} def play(self, board: chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves = list(board.legal_moves) move =", "def __init__(self): self.id = { 'name': 'RandomPlayer' } def play(self, board: chess.Board, limit=None)", "chess.engine class RandomPlayer: def __init__(self): self.id = { 'name': 'RandomPlayer' } def play(self,", "class RandomPlayer: def __init__(self): self.id = { 'name': 'RandomPlayer' } def play(self, board:", "chess import chess.engine class RandomPlayer: def __init__(self): self.id = { 'name': 'RandomPlayer' }", "board: chess.Board, limit=None) -> chess.engine.PlayResult: legal_moves = list(board.legal_moves) move = random.choice(legal_moves) return chess.engine.PlayResult(move=move," ]
[ "**keywords): \"\"\" Simply sums up all the passed numbers. \"\"\" count = initial", "def total (initial, *positionals, **keywords): \"\"\" Simply sums up all the passed numbers.", "sums up all the passed numbers. \"\"\" count = initial for n in", "positionals: count += n for n in keywords: count += keywords[n] return count", "the passed numbers. \"\"\" count = initial for n in positionals: count +=", "all the passed numbers. \"\"\" count = initial for n in positionals: count", "Simply sums up all the passed numbers. \"\"\" count = initial for n", "for n in positionals: count += n for n in keywords: count +=", "count = initial for n in positionals: count += n for n in", "up all the passed numbers. \"\"\" count = initial for n in positionals:", "passed numbers. \"\"\" count = initial for n in positionals: count += n", "count += n for n in keywords: count += keywords[n] return count print(__name__)", "\"\"\" Simply sums up all the passed numbers. \"\"\" count = initial for", "= initial for n in positionals: count += n for n in keywords:", "\"\"\" count = initial for n in positionals: count += n for n", "(initial, *positionals, **keywords): \"\"\" Simply sums up all the passed numbers. \"\"\" count", "n in positionals: count += n for n in keywords: count += keywords[n]", "initial for n in positionals: count += n for n in keywords: count", "numbers. \"\"\" count = initial for n in positionals: count += n for", "*positionals, **keywords): \"\"\" Simply sums up all the passed numbers. \"\"\" count =", "total (initial, *positionals, **keywords): \"\"\" Simply sums up all the passed numbers. \"\"\"", "in positionals: count += n for n in keywords: count += keywords[n] return" ]
[ "value] = receiveTLV() # Expect Position if length < 13: print(\"No position received.", "script. ser.write(txBuffer) except: print(f\"Error during transmission of request {txBuffer.hex()}\") stopLoop = True return", "= parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d, dq] = distances[i]", "descript. parser.add_argument( '--port', default=defaultPortName, help='specify the name of the port to use (default:", "byteorder='little') return [x, y, z, q] def parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY =", "buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE # ---------Now, I read until I get the", "for SPI dummy byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z", "already done so import time import sys import argparse defaultPortName = '/dev/ttyACM0' #", "# Response: Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor", "length < 13: print(\"No position received. Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV,", "<NAME> # Intended for use with DWM 1001 module through UART TLV interface", "------------- # 2. Receive response. May get dummy bytes before real response. [typeTLV,", "z, q] def parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for", "[] for i in range(num_distances): offset = i*13+1 addr = value[offset:offset+2].hex() # UWB", "(See myParser() function) import serial # use \"pip install pyserial\" if you have", "API Guide 5.3.10. # It parses the information received to send over #", "[addr, d, dq] = distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq)) def getLocations(): #", "= serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection established.\") except: print(\"Error in trying to connect", "be written to anchor nodes # Currently limited to Python 3.6+. Use command", "else: printTLV(typeTLV, length, value) [typeTLV, length, value] = receiveTLV() # Expect Distances if", "port connection typeTLV = TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY): typeTLV = ser.read(1) #", "# Response position coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging", "to set position. Follow with position as 13 bytes DWM_POS_GET = bytes.fromhex(\"02 00\")", "for i in range(num_distances): [addr, d, dq] = distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d,", "---------Now, I read until I get the position [typeTLV, length, value] = receiveTLV()", "in trying to connect to serial port {}\".format(myPort)) stopLoop = False # Loop", "network. # In the future, this script will be expanded to allow #", "byteorder = 'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr, d, dq]) return", "the name of the port (See myParser() function) import serial # use \"pip", "= 'get position info') # Script descript. parser.add_argument( '--port', default=defaultPortName, help='specify the name", "is 8 bytes here, not 2 bytes d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little')", "trying to connect to serial port {}\".format(myPort)) stopLoop = False # Loop plan:", "code]. return [typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value): # This helper function takes a", "{:=<5}\".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q')) [num_distances, distances] = parseTLV(typeTLV, length, value)", "= bytes.fromhex(\"02 00\") # Used to ask for position. DWM_LOC_GET = bytes.fromhex(\"0c 00\")", "distances.append([addr, d, dq, x, y, z, q]) return [num_distances, distances] # Default case:", "buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV, length, value) [typeTLV, length, value] = receiveTLV()", "September 2019 by <NAME> # Intended for use with DWM 1001 module through", "the \"type\" byte of the response lengthTLV = ser.read(1) # Read the \"length\"", "printTLV(typeTLV, length, value) [typeTLV, length, value] = receiveTLV() # Expect Distances if length", "< 13: print(\"No position received. Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV, length,", "the dwm_loc_get API call as specified in the # DWM1001 Firmware API Guide", "num_distances = int.from_bytes(value[0:1]) distances = [] for i in range (num_distances): offset =", "code may be received from an anchor node num_distances = int.from_bytes(value[0:1]) distances =", "interface # This script calls the dwm_loc_get API call as specified in the", "# Read the \"type\" byte of the response lengthTLV = ser.read(1) # Read", "the value [error code]. return [typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value): # This helper", "you should use /dev/ttyACM0 # ON Windows, the port name may be 'COM9'", "# This will be the name of the handle to the serial port", "the position [typeTLV, length, value] = receiveTLV() # Expect Position if length <", "info') # Script descript. parser.add_argument( '--port', default=defaultPortName, help='specify the name of the port", "while (typeTLV == TLV_TYPE_DUMMY): typeTLV = ser.read(1) # Read the \"type\" byte of", "Guide 5.3.10. # It parses the information received to send over # the", "def receiveTLV(): # Listen for TLV response from Decawave DWM1001 module # Returns", "2: internal error # 3: invalid parameter # 4: busy # 5: operation", "response lengthTLV = ser.read(1) # Read the \"length\" byte of the response lengthTLV", "# 3. Output message # ---------- while stopLoop is False: getLocations() def sendTLV(request):", "confirmation / error code # --Second response is position # 2.5 Error handling", "print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15}", "= int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV) # Read the value [error code]. return", "def main(): global ser print(\"dwmPosGet started.\") myPort = myParser() # Establish serial port", "Default case: print(\"Error: attempted to parse TLV of type not yet supported.\") return", "position. DWM_LOC_GET = bytes.fromhex(\"0c 00\") # Request for position + distances to anchors/tags", "for TLV response from Decawave DWM1001 module # Returns a list of [Type,", "5: operation not permitted # API Commands [Type, length] DWM_POS_SET = bytes.fromhex(\"01 0d\")", "[addr, d, dq, x, y, z, q] = distances[i] print(\"{:<5} {:<15} {:<5} {:<15}", "values x = int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12], byteorder='little')", "z, q] = parsePOSvalue(value) return [x, y, z, q] if typeTLV == TLV_TYPE_RNG_AN_DIST:", "the port to use (default: ' + defaultPortName + ' )' ) args", "byteorder='little') q = int.from_bytes(value[12:13], byteorder='little') return [x, y, z, q] def parseTLV(typeTLV, length,", "limited to Python 3.6+. Use command line arguments # to specify the name", "= 'little') distances.append([addr, d, dq]) return [num_distances, distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances", "q] = parsePOSvalue(value) return [x, y, z, q] if typeTLV == TLV_TYPE_RNG_AN_DIST: #", "quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x, y, z, q]) return [num_distances,", "Being written September 2019 by <NAME> # Intended for use with DWM 1001", "function handles command lets the user specify the # name of the port", "d, dq)) def getLocations(): # 1. Ask Decawave for Position and distances temp", "# use \"pip install pyserial\" if you have not already done so import", "It parses the information received to send over # the ROS network. #", "value) [typeTLV, length, value] = receiveTLV() # Expect Distances if length < 13:", "# Read the value [error code]. return [typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value): #", "Receive response, parsing as I go # --First response is confirmation / error", ") args = parser.parse_args() print(\"Using port:\", args.port) return args.port ser = None #", "receiveTLV(): # Listen for TLV response from Decawave DWM1001 module # Returns a", "byteorder = 'little') distances.append([addr, d, dq]) return [num_distances, distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST:", "= 'little') # distance dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality", "response. May get dummy bytes before real response. [typeTLV, length, value]= receiveTLV() if", "Response: Ranging anchor distances and positions if typeTLV == TLV_TYPE_POS_XYZ: [x, y, z,", "the ROS network. # In the future, this script will be expanded to", "written to anchor nodes # Currently limited to Python 3.6+. Use command line", "if you have not already done so import time import sys import argparse", "nodes # Currently limited to Python 3.6+. Use command line arguments # to", "time import sys import argparse defaultPortName = '/dev/ttyACM0' # On linux, you should", "the response lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV) # Read the value", "= False # Loop plan: # 1. Ask Decawave for position # 2.", "global ser # The handle for the serial port connection typeTLV = TLV_TYPE_DUMMY", "return [x, y, z, q] if typeTLV == TLV_TYPE_RNG_AN_DIST: # This code may", "'y', 'z', 'q')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i in range(num_distances):", "will be the name of the handle to the serial port EXIT_SUCCESS =", "# dwmDistances # Being written September 2019 by <NAME> # Intended for use", "bytes DWM_POS_GET = bytes.fromhex(\"02 00\") # Used to ask for position. DWM_LOC_GET =", "in range(num_distances): offset = i*13+1 addr = value[offset:offset+2].hex() # UWB address d =", "calls the dwm_loc_get API call as specified in the # DWM1001 Firmware API", "the port (See myParser() function) import serial # use \"pip install pyserial\" if", "permitted # API Commands [Type, length] DWM_POS_SET = bytes.fromhex(\"01 0d\") # Used to", "TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x', 'y', 'z',", "Note: Address size is 8 bytes here, not 2 bytes d = int.from_bytes(value[offset+8:offset+12],", "to be written to anchor nodes # Currently limited to Python 3.6+. Use", "TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions def main(): global", "EXIT_FAILURE else: printTLV(typeTLV, length, value) [typeTLV, length, value] = receiveTLV() # Expect Distances", "is confirmation / error code # --Second response is position # 2.5 Error", "linux, you should use /dev/ttyACM0 # ON Windows, the port name may be", "TLV_TYPE_RNG_AN_DIST: # This code may be received from an anchor node num_distances =", "\"pip install pyserial\" if you have not already done so import time import", "stopLoop = True return EXIT_FAILURE return EXIT_SUCCESS def receiveTLV(): # Listen for TLV", "[x, y, z, q] = parsePOSvalue(value) return [x, y, z, q] if typeTLV", "Distances if length < 13: print(\"No distances received\") else: printTLV(typeTLV, length, value) #", "invalid parameter # 4: busy # 5: operation not permitted # API Commands", "in the buffer that could confuse this script. ser.write(txBuffer) except: print(f\"Error during transmission", "ser.read(1) # Read the \"type\" byte of the response lengthTLV = ser.read(1) #", "== TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i", "q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST =", "of type not yet supported.\") return EXIT_FAILURE def printTLV(typeTLV, length, value): if typeTLV", "TLV_TYPE_POS_XYZ: [x, y, z, q] = parsePOSvalue(value) return [x, y, z, q] if", "[x,y,z,q] = parseTLV(typeTLV, length, value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST:", "of the handle to the serial port EXIT_SUCCESS = 0 EXIT_FAILURE = 1", "valueTLV] def parsePOSvalue(value): # This helper function takes a 13-byte position code and", "[] for i in range (num_distances): offset = i*13+1 addr = value[offset:offset+8].hex() #", "serial port EXIT_SUCCESS = 0 EXIT_FAILURE = 1 # API Error codes ERR_CODE_OK", "serial port {}\".format(myPort)) stopLoop = False # Loop plan: # 1. Ask Decawave", "return [typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value): # This helper function takes a 13-byte", "# distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x, y, z, q])", "from Decawave DWM1001 module # Returns a list of [Type, Length, Value] #", "'get position info') # Script descript. parser.add_argument( '--port', default=defaultPortName, help='specify the name of", "API Commands [Type, length] DWM_POS_SET = bytes.fromhex(\"01 0d\") # Used to set position.", "distances] = parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d, dq, x,", "3.6+. Use command line arguments # to specify the name of the port", "yet supported.\") return EXIT_FAILURE def printTLV(typeTLV, length, value): if typeTLV == TLV_TYPE_POS_XYZ: print(", "myPort = myParser() # Establish serial port connection try: ser = serial.Serial(myPort, baudrate=115200,", "SPI dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with q", "int.from_bytes(value[0:1], byteorder = 'little') distances = [] for i in range(num_distances): offset =", "= 'little') distances = [] for i in range(num_distances): offset = i*13+1 addr", "to serial port {}\".format(myPort)) stopLoop = False # Loop plan: # 1. Ask", "# position updates to be written to anchor nodes # Currently limited to", "sendTLV(request): global ser txBuffer = request try: ser.reset_input_buffer() # Get rid of anything", "def printTLV(typeTLV, length, value): if typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q'))", "= True return EXIT_FAILURE return EXIT_SUCCESS def receiveTLV(): # Listen for TLV response", "position. Follow with position as 13 bytes DWM_POS_GET = bytes.fromhex(\"02 00\") # Used", "3. Output message # ---------- while stopLoop is False: getLocations() def sendTLV(request): global", "dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr,", "if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq',", "addr = value[offset:offset+8].hex() # Note: Address size is 8 bytes here, not 2", "int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') #", "typeTLV == TLV_TYPE_RNG_AN_DIST: # This code may be received from an anchor node", "# Expect Position if length < 13: print(\"No position received. Flushing buffer.\") ser.reset_input_buffer()", "= receiveTLV() # Expect Distances if length < 13: print(\"No distances received\") else:", "2. Receive response, parsing as I go # --First response is confirmation /", "and positions def main(): global ser print(\"dwmPosGet started.\") myPort = myParser() # Establish", "codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\")", "byteorder='little') valueTLV = ser.read(lengthTLV) # Read the value [error code]. return [typeTLV, lengthTLV,", "for i in range(num_distances): [addr, d, dq, x, y, z, q] = distances[i]", "expanded to allow # position updates to be written to anchor nodes #", "= int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13], byteorder='little') return [x, y, z, q] def", "distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions def main():", "= distances[i] print(\"{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr, d, dq, x, y,", "{:<15} {:<5}\".format(addr, d, dq)) def getLocations(): # 1. Ask Decawave for Position and", "EXIT_SUCCESS def receiveTLV(): # Listen for TLV response from Decawave DWM1001 module #", "= bytes.fromhex(\"48\") # Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging", "name of the port (See myParser() function) import serial # use \"pip install", "(default: ' + defaultPortName + ' )' ) args = parser.parse_args() print(\"Using port:\",", "[num_distances, distances] = parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d, dq]", "= bytes.fromhex(\"00\") # Reserved for SPI dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response", "valueTLV = ser.read(lengthTLV) # Read the value [error code]. return [typeTLV, lengthTLV, valueTLV]", "response from Decawave DWM1001 module # Returns a list of [Type, Length, Value]", "distances and positions if typeTLV == TLV_TYPE_POS_XYZ: [x, y, z, q] = parsePOSvalue(value)", "print(\"dwmPosGet started.\") myPort = myParser() # Establish serial port connection try: ser =", "d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance dq = int.from_bytes(value[offset+6:offset+7], byteorder =", "import serial # use \"pip install pyserial\" if you have not already done", "lets the user specify the # name of the port to use with", "be the name of the handle to the serial port EXIT_SUCCESS = 0", "the # x, y, z, and q values x = int.from_bytes(value[0:4], byteorder='little') y", "until I get the position [typeTLV, length, value] = receiveTLV() # Expect Position", "connection try: ser = serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection established.\") except: print(\"Error in", "except: print(\"Error in trying to connect to serial port {}\".format(myPort)) stopLoop = False", "{:<15} {:<15} {:<15} {:<5}\".format(addr, d, dq, x, y, z, q)) if typeTLV ==", "not yet supported.\") return EXIT_FAILURE def printTLV(typeTLV, length, value): if typeTLV == TLV_TYPE_POS_XYZ:", "handle to the serial port EXIT_SUCCESS = 0 EXIT_FAILURE = 1 # API", "= 'little') # distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x, y,", "q values x = int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12],", "the serial port EXIT_SUCCESS = 0 EXIT_FAILURE = 1 # API Error codes", "This function handles command lets the user specify the # name of the", "is position # 2.5 Error handling # 3. Output message # ---------- while", "TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with q # TLV_TYPE_RNG_AN_DIST =", "\"type\" byte of the response lengthTLV = ser.read(1) # Read the \"length\" byte", "# 2. Receive response. May get dummy bytes before real response. [typeTLV, length,", "to allow # position updates to be written to anchor nodes # Currently", "distances and positions def main(): global ser print(\"dwmPosGet started.\") myPort = myParser() #", "# Response: Ranging anchor distances and positions def main(): global ser print(\"dwmPosGet started.\")", "[Type, length] DWM_POS_SET = bytes.fromhex(\"01 0d\") # Used to set position. Follow with", "bytes.fromhex(\"00\") # 1: unknown command or broken TLV frame # 2: internal error", "Decawave for position # 2. Receive response, parsing as I go # --First", "= [] for i in range(num_distances): offset = i*13+1 addr = value[offset:offset+2].hex() #", "printTLV(typeTLV, length, value) # The following lines allow this script to run as", "not 2 bytes d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder", "# Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte TLV_TYPE_POS_XYZ", "# TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte # TLV_TYPE_POS_XYZ =", "z, q)) if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV,", "distances.append([addr, d, dq]) return [num_distances, distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1],", "ROS network. # In the future, this script will be expanded to allow", "for next message global ser # The handle for the serial port connection", "the \"length\" byte of the response lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV)", "and returns the # x, y, z, and q values x = int.from_bytes(value[0:4],", "'little') # distance dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality [x,y,z,q]", "Expect Position if length < 13: print(\"No position received. Flushing buffer.\") ser.reset_input_buffer() return", "sys import argparse defaultPortName = '/dev/ttyACM0' # On linux, you should use /dev/ttyACM0", "not permitted # API Commands [Type, length] DWM_POS_SET = bytes.fromhex(\"01 0d\") # Used", "# Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances", "value [error code]. return [typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value): # This helper function", "x = int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12], byteorder='little') q", "print(\"{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr, d, dq, x, y, z, q))", "txBuffer = request try: ser.reset_input_buffer() # Get rid of anything in the buffer", "[typeTLV, length, value] = receiveTLV() # Expect Position if length < 13: print(\"No", "Use command line arguments # to specify the name of the port (See", "= int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12], byteorder='little') q =", "use \"pip install pyserial\" if you have not already done so import time", "q] def parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI", "# TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\")", "buffer that could confuse this script. ser.write(txBuffer) except: print(f\"Error during transmission of request", "defaultPortName + ' )' ) args = parser.parse_args() print(\"Using port:\", args.port) return args.port", "with DWM 1001 module through UART TLV interface # This script calls the", "00\") # Used to ask for position. DWM_LOC_GET = bytes.fromhex(\"0c 00\") # Request", "= bytes.fromhex(\"0c 00\") # Request for position + distances to anchors/tags # Response", "ser = serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection established.\") except: print(\"Error in trying to", "allow # position updates to be written to anchor nodes # Currently limited", "printTLV(typeTLV, length, value): if typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q]", "written September 2019 by <NAME> # Intended for use with DWM 1001 module", "= parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d, dq, x, y,", "args = parser.parse_args() print(\"Using port:\", args.port) return args.port ser = None # This", "= argparse.ArgumentParser(description = 'get position info') # Script descript. parser.add_argument( '--port', default=defaultPortName, help='specify", "received. Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV, length, value) [typeTLV, length, value]", "= i*13+1 addr = value[offset:offset+2].hex() # UWB address d = int.from_bytes(value[offset+2:offset+6], byteorder =", "parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x, y, z, q]) return [num_distances, distances] # Default", "return [num_distances, distances] # Default case: print(\"Error: attempted to parse TLV of type", "[typeTLV, length, value] = receiveTLV() # Expect Distances if length < 13: print(\"No", "ser print(\"dwmPosGet started.\") myPort = myParser() # Establish serial port connection try: ser", "ser.write(txBuffer) except: print(f\"Error during transmission of request {txBuffer.hex()}\") stopLoop = True return EXIT_FAILURE", "value) for i in range(num_distances): [addr, d, dq] = distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr,", "may be received from an anchor node num_distances = int.from_bytes(value[0:1]) distances = []", "z, and q values x = int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8], byteorder='little') z", "Response: Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances", "< 13: print(\"No distances received\") else: printTLV(typeTLV, length, value) # The following lines", "I get the position [typeTLV, length, value] = receiveTLV() # Expect Position if", "= '/dev/ttyACM0' # On linux, you should use /dev/ttyACM0 # ON Windows, the", "return EXIT_FAILURE return EXIT_SUCCESS def receiveTLV(): # Listen for TLV response from Decawave", "specified in the # DWM1001 Firmware API Guide 5.3.10. # It parses the", "{:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd',", "except: print(f\"Error during transmission of request {txBuffer.hex()}\") stopLoop = True return EXIT_FAILURE return", "= int.from_bytes(value[12:13], byteorder='little') return [x, y, z, q] def parseTLV(typeTLV, length, value): #", "value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15}", "UWB address d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance dq = int.from_bytes(value[offset+6:offset+7],", "TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") #", "# --First response is confirmation / error code # --Second response is position", "byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with q #", "if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV, length, value)", "command lets the user specify the # name of the port to use", "{:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length, value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV ==", "y, z, q] = distances[i] print(\"{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr, d,", ")' ) args = parser.parse_args() print(\"Using port:\", args.port) return args.port ser = None", "print(f\"Error during transmission of request {txBuffer.hex()}\") stopLoop = True return EXIT_FAILURE return EXIT_SUCCESS", "parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte", "[num_distances, distances] # Default case: print(\"Error: attempted to parse TLV of type not", "name of the port to use (default: ' + defaultPortName + ' )'", "bytes here, not 2 bytes d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq =", "allow this script to run as a program if called directly. if __name__", "byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST =", "command line arguments # to specify the name of the port (See myParser()", "bytes before real response. [typeTLV, length, value]= receiveTLV() if value != ERR_CODE_OK: print(\"Received", "of the response lengthTLV = ser.read(1) # Read the \"length\" byte of the", "unknown command or broken TLV frame # 2: internal error # 3: invalid", "value] = receiveTLV() # Expect Distances if length < 13: print(\"No distances received\")", "print(ser) print(\"Connection established.\") except: print(\"Error in trying to connect to serial port {}\".format(myPort))", "1: unknown command or broken TLV frame # 2: internal error # 3:", "Follow with position as 13 bytes DWM_POS_GET = bytes.fromhex(\"02 00\") # Used to", "coordinates x,y,z with q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances", "TLV response from Decawave DWM1001 module # Returns a list of [Type, Length,", "an error message. Flushing input buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE # ---------Now, I", "# Intended for use with DWM 1001 module through UART TLV interface #", "error # 3: invalid parameter # 4: busy # 5: operation not permitted", "[x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x, y, z, q]) return [num_distances, distances]", "and q values x = int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8], byteorder='little') z =", "# It parses the information received to send over # the ROS network.", "def getLocations(): # 1. Ask Decawave for Position and distances temp = sendTLV(DWM_LOC_GET)", "received from an anchor node num_distances = int.from_bytes(value[0:1]) distances = [] for i", "parser.add_argument( '--port', default=defaultPortName, help='specify the name of the port to use (default: '", "return EXIT_FAILURE else: printTLV(typeTLV, length, value) [typeTLV, length, value] = receiveTLV() # Expect", "in range (num_distances): offset = i*13+1 addr = value[offset:offset+8].hex() # Note: Address size", "serial port connection try: ser = serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection established.\") except:", "EXIT_FAILURE def printTLV(typeTLV, length, value): if typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15}", "int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13], byteorder='little') return [x, y,", "print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i in range(num_distances):", "--port=[name or number] parser = argparse.ArgumentParser(description = 'get position info') # Script descript.", "EXIT_FAILURE # ------------- # 2. Receive response. May get dummy bytes before real", "node num_distances = int.from_bytes(value[0:1]) distances = [] for i in range (num_distances): offset", "y, z, q] = parsePOSvalue(value) return [x, y, z, q] if typeTLV ==", "# 1: unknown command or broken TLV frame # 2: internal error #", "z, q] = distances[i] print(\"{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr, d, dq,", "= bytes.fromhex(\"00\") # Reserved for SPI dummy byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") #", "= bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions def main(): global ser", "distance dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20])", "byte of the response lengthTLV = ser.read(1) # Read the \"length\" byte of", "This script calls the dwm_loc_get API call as specified in the # DWM1001", "operation not permitted # API Commands [Type, length] DWM_POS_SET = bytes.fromhex(\"01 0d\") #", "line argument. # --port=[name or number] parser = argparse.ArgumentParser(description = 'get position info')", "parsePOSvalue(value) return [x, y, z, q] if typeTLV == TLV_TYPE_RNG_AN_DIST: # This code", "parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d, dq] = distances[i] print(\"{:<5}", "'little') distances.append([addr, d, dq]) return [num_distances, distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances =", "DWM 1001 module through UART TLV interface # This script calls the dwm_loc_get", "module through UART TLV interface # This script calls the dwm_loc_get API call", "' + defaultPortName + ' )' ) args = parser.parse_args() print(\"Using port:\", args.port)", "the handle to the serial port EXIT_SUCCESS = 0 EXIT_FAILURE = 1 #", "parseTLV(typeTLV, length, value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15}", "TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i in", "ser.reset_input_buffer() return EXIT_FAILURE # ---------Now, I read until I get the position [typeTLV,", "Firmware API Guide 5.3.10. # It parses the information received to send over", "position coordinates x,y,z with q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor", "similar def myParser(): # This function handles command lets the user specify the", "name of the handle to the serial port EXIT_SUCCESS = 0 EXIT_FAILURE =", "--First response is confirmation / error code # --Second response is position #", "return args.port ser = None # This will be the name of the", "to run as a program if called directly. if __name__ == \"__main__\": main()", "function takes a 13-byte position code and returns the # x, y, z,", "Used to set position. Follow with position as 13 bytes DWM_POS_GET = bytes.fromhex(\"02", "= bytes.fromhex(\"00\") # 1: unknown command or broken TLV frame # 2: internal", "{:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr, d, dq, x, y, z, q)) if typeTLV", "# to specify the name of the port (See myParser() function) import serial", "busy # 5: operation not permitted # API Commands [Type, length] DWM_POS_SET =", "bytes.fromhex(\"41\") # Response position coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response:", "range(num_distances): [addr, d, dq, x, y, z, q] = distances[i] print(\"{:<5} {:<15} {:<5}", "if length < 13: print(\"No distances received\") else: printTLV(typeTLV, length, value) # The", "API call as specified in the # DWM1001 Firmware API Guide 5.3.10. #", "= int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13], byteorder='little') return [x,", "= distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq)) def getLocations(): # 1. Ask Decawave", "of the port (See myParser() function) import serial # use \"pip install pyserial\"", "# UWB address d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance dq =", "with q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST", "temp == EXIT_FAILURE: return EXIT_FAILURE # ------------- # 2. Receive response. May get", "== EXIT_FAILURE: return EXIT_FAILURE # ------------- # 2. Receive response. May get dummy", "Used to ask for position. DWM_LOC_GET = bytes.fromhex(\"0c 00\") # Request for position", "of [Type, Length, Value] # If it receives TLV_TYPE_DUMMY, it keeps listening for", "following lines allow this script to run as a program if called directly.", "Expect Distances if length < 13: print(\"No distances received\") else: printTLV(typeTLV, length, value)", "# This code may be received from an anchor node num_distances = int.from_bytes(value[0:1])", "getLocations(): # 1. Ask Decawave for Position and distances temp = sendTLV(DWM_LOC_GET) if", "specify the name of the port (See myParser() function) import serial # use", "use (default: ' + defaultPortName + ' )' ) args = parser.parse_args() print(\"Using", "# name of the port to use with a command line argument. #", "port to use (default: ' + defaultPortName + ' )' ) args =", "position info') # Script descript. parser.add_argument( '--port', default=defaultPortName, help='specify the name of the", "addr = value[offset:offset+2].hex() # UWB address d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') #", "2 bytes d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder =", "input buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE # ---------Now, I read until I get", "the name of the port to use (default: ' + defaultPortName + '", "position as 13 bytes DWM_POS_GET = bytes.fromhex(\"02 00\") # Used to ask for", "13: print(\"No position received. Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV, length, value)", "length, value]= receiveTLV() if value != ERR_CODE_OK: print(\"Received an error message. Flushing input", "{:<15} {:<15} {:<5}\".format(addr, d, dq, x, y, z, q)) if typeTLV == TLV_TYPE_RNG_AN_DIST:", "not already done so import time import sys import argparse defaultPortName = '/dev/ttyACM0'", "real response. [typeTLV, length, value]= receiveTLV() if value != ERR_CODE_OK: print(\"Received an error", "DWM1001 Firmware API Guide 5.3.10. # It parses the information received to send", "or number] parser = argparse.ArgumentParser(description = 'get position info') # Script descript. parser.add_argument(", "May get dummy bytes before real response. [typeTLV, length, value]= receiveTLV() if value", "# 4: busy # 5: operation not permitted # API Commands [Type, length]", "Read the \"length\" byte of the response lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV =", "before real response. [typeTLV, length, value]= receiveTLV() if value != ERR_CODE_OK: print(\"Received an", "to anchors/tags # Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy", "could confuse this script. ser.write(txBuffer) except: print(f\"Error during transmission of request {txBuffer.hex()}\") stopLoop", "[num_distances, distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder = 'little') distances", "ser.read(lengthTLV) # Read the value [error code]. return [typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value):", "position [typeTLV, length, value] = receiveTLV() # Expect Position if length < 13:", "temp = sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE: return EXIT_FAILURE # ------------- # 2.", "[Type, Length, Value] # If it receives TLV_TYPE_DUMMY, it keeps listening for next", "byteorder = 'little') distances = [] for i in range(num_distances): offset = i*13+1", "by <NAME> # Intended for use with DWM 1001 module through UART TLV", "Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and", "Error handling # 3. Output message # ---------- while stopLoop is False: getLocations()", "dwmDistances # Being written September 2019 by <NAME> # Intended for use with", "1 # API Error codes ERR_CODE_OK = bytes.fromhex(\"00\") # 1: unknown command or", "keeps listening for next message global ser # The handle for the serial", "= i*13+1 addr = value[offset:offset+8].hex() # Note: Address size is 8 bytes here,", "port name may be 'COM9' or similar def myParser(): # This function handles", "over # the ROS network. # In the future, this script will be", "and distances temp = sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE: return EXIT_FAILURE # -------------", "anchor distances and positions if typeTLV == TLV_TYPE_POS_XYZ: [x, y, z, q] =", "EXIT_SUCCESS = 0 EXIT_FAILURE = 1 # API Error codes ERR_CODE_OK = bytes.fromhex(\"00\")", "message global ser # The handle for the serial port connection typeTLV =", "Address size is 8 bytes here, not 2 bytes d = int.from_bytes(value[offset+8:offset+12], byteorder", "x, y, z, and q values x = int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8],", "0 EXIT_FAILURE = 1 # API Error codes ERR_CODE_OK = bytes.fromhex(\"00\") # 1:", "== TLV_TYPE_POS_XYZ: [x, y, z, q] = parsePOSvalue(value) return [x, y, z, q]", "# 1. Ask Decawave for position # 2. Receive response, parsing as I", "= parsePOSvalue(value) return [x, y, z, q] if typeTLV == TLV_TYPE_RNG_AN_DIST: # This", "from an anchor node num_distances = int.from_bytes(value[0:1]) distances = [] for i in", "value): if typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV,", "x, y, z, q] = distances[i] print(\"{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr,", "distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq)) def getLocations(): # 1. Ask Decawave for", "received\") else: printTLV(typeTLV, length, value) # The following lines allow this script to", "bytes.fromhex(\"01 0d\") # Used to set position. Follow with position as 13 bytes", "it receives TLV_TYPE_DUMMY, it keeps listening for next message global ser # The", "bytes.fromhex(\"00\") # Reserved for SPI dummy byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response", "next message global ser # The handle for the serial port connection typeTLV", "if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder = 'little') distances = []", "print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q'))", "{:<15} {:<5}\".format(addr, d, dq, x, y, z, q)) if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5}", "= int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little')", "# Listen for TLV response from Decawave DWM1001 module # Returns a list", "bytes.fromhex(\"48\") # Response: Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging", "q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") #", "value[offset:offset+8].hex() # Note: Address size is 8 bytes here, not 2 bytes d", "to specify the name of the port (See myParser() function) import serial #", "send over # the ROS network. # In the future, this script will", "myParser() function) import serial # use \"pip install pyserial\" if you have not", "bytes.fromhex(\"02 00\") # Used to ask for position. DWM_LOC_GET = bytes.fromhex(\"0c 00\") #", "print(\"Error: attempted to parse TLV of type not yet supported.\") return EXIT_FAILURE def", "'--port', default=defaultPortName, help='specify the name of the port to use (default: ' +", "ser # The handle for the serial port connection typeTLV = TLV_TYPE_DUMMY while", "= int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr, d,", "of anything in the buffer that could confuse this script. ser.write(txBuffer) except: print(f\"Error", "int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq,", "in the # DWM1001 Firmware API Guide 5.3.10. # It parses the information", "= receiveTLV() # Expect Position if length < 13: print(\"No position received. Flushing", "lines allow this script to run as a program if called directly. if", "this script to run as a program if called directly. if __name__ ==", "attempted to parse TLV of type not yet supported.\") return EXIT_FAILURE def printTLV(typeTLV,", "return EXIT_FAILURE # ------------- # 2. Receive response. May get dummy bytes before", "the serial port connection typeTLV = TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY): typeTLV =", "response. [typeTLV, length, value]= receiveTLV() if value != ERR_CODE_OK: print(\"Received an error message.", "parser.parse_args() print(\"Using port:\", args.port) return args.port ser = None # This will be", "# Get rid of anything in the buffer that could confuse this script.", "distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder = 'little') distances =", "# Response position coordinates x,y,z with q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response:", "address d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance dq = int.from_bytes(value[offset+6:offset+7], byteorder", "[typeTLV, length, value]= receiveTLV() if value != ERR_CODE_OK: print(\"Received an error message. Flushing", "/ error code # --Second response is position # 2.5 Error handling #", "anchor node num_distances = int.from_bytes(value[0:1]) distances = [] for i in range (num_distances):", "distances to anchors/tags # Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI", "= parser.parse_args() print(\"Using port:\", args.port) return args.port ser = None # This will", "# API Error codes ERR_CODE_OK = bytes.fromhex(\"00\") # 1: unknown command or broken", "# --Second response is position # 2.5 Error handling # 3. Output message", "typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder = 'little') distances = [] for", "parse TLV of type not yet supported.\") return EXIT_FAILURE def printTLV(typeTLV, length, value):", "port EXIT_SUCCESS = 0 EXIT_FAILURE = 1 # API Error codes ERR_CODE_OK =", "value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte # TLV_TYPE_POS_XYZ", "plan: # 1. Ask Decawave for position # 2. Receive response, parsing as", "'d', 'dq', 'x', 'y', 'z', 'q')) [num_distances, distances] = parseTLV(typeTLV, length, value) for", "distances] = parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d, dq] =", "Ask Decawave for Position and distances temp = sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE:", "return [num_distances, distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder = 'little')", "'dq', 'x', 'y', 'z', 'q')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i", "to anchor nodes # Currently limited to Python 3.6+. Use command line arguments", "helper function takes a 13-byte position code and returns the # x, y,", "{:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q')) [num_distances, distances] = parseTLV(typeTLV,", "int.from_bytes(value[12:13], byteorder='little') return [x, y, z, q] def parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY", "# The handle for the serial port connection typeTLV = TLV_TYPE_DUMMY while (typeTLV", "the name of the handle to the serial port EXIT_SUCCESS = 0 EXIT_FAILURE", "# Reserved for SPI dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates", "2. Receive response. May get dummy bytes before real response. [typeTLV, length, value]=", "= myParser() # Establish serial port connection try: ser = serial.Serial(myPort, baudrate=115200, timeout=None)", "module # Returns a list of [Type, Length, Value] # If it receives", "{:<15} {:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr, d, dq, x, y, z, q)) if", "to connect to serial port {}\".format(myPort)) stopLoop = False # Loop plan: #", "handling # 3. Output message # ---------- while stopLoop is False: getLocations() def", "of the port to use (default: ' + defaultPortName + ' )' )", "listening for next message global ser # The handle for the serial port", "if typeTLV == TLV_TYPE_POS_XYZ: [x, y, z, q] = parsePOSvalue(value) return [x, y,", "dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr, d, dq]) return [num_distances, distances] if", "Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV, length, value) [typeTLV, length, value] =", "x, y, z, q)) if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances]", "In the future, this script will be expanded to allow # position updates", "y, z, q]) return [num_distances, distances] # Default case: print(\"Error: attempted to parse", "as 13 bytes DWM_POS_GET = bytes.fromhex(\"02 00\") # Used to ask for position.", "ser = None # This will be the name of the handle to", "'z', 'q')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i in range(num_distances): [addr,", "distances[i] print(\"{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr, d, dq, x, y, z,", "' )' ) args = parser.parse_args() print(\"Using port:\", args.port) return args.port ser =", "'COM9' or similar def myParser(): # This function handles command lets the user", "started.\") myPort = myParser() # Establish serial port connection try: ser = serial.Serial(myPort,", "a command line argument. # --port=[name or number] parser = argparse.ArgumentParser(description = 'get", "myParser(): # This function handles command lets the user specify the # name", "i*13+1 addr = value[offset:offset+8].hex() # Note: Address size is 8 bytes here, not", "API Error codes ERR_CODE_OK = bytes.fromhex(\"00\") # 1: unknown command or broken TLV", "Establish serial port connection try: ser = serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection established.\")", "the future, this script will be expanded to allow # position updates to", "bytes.fromhex(\"00\") # Reserved for SPI dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position", "== TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x', 'y',", "# Being written September 2019 by <NAME> # Intended for use with DWM", "num_distances = int.from_bytes(value[0:1], byteorder = 'little') distances = [] for i in range(num_distances):", "# Request for position + distances to anchors/tags # Response codes TLV_TYPE_DUMMY =", "q = int.from_bytes(value[12:13], byteorder='little') return [x, y, z, q] def parseTLV(typeTLV, length, value):", "to use with a command line argument. # --port=[name or number] parser =", "done so import time import sys import argparse defaultPortName = '/dev/ttyACM0' # On", "= sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE: return EXIT_FAILURE # ------------- # 2. Receive", "ON Windows, the port name may be 'COM9' or similar def myParser(): #", "{}\".format(myPort)) stopLoop = False # Loop plan: # 1. Ask Decawave for position", "On linux, you should use /dev/ttyACM0 # ON Windows, the port name may", "for Position and distances temp = sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE: return EXIT_FAILURE", "try: ser.reset_input_buffer() # Get rid of anything in the buffer that could confuse", "Listen for TLV response from Decawave DWM1001 module # Returns a list of", "8 bytes here, not 2 bytes d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq", "{:<5}\".format(addr, d, dq)) def getLocations(): # 1. Ask Decawave for Position and distances", "anchor nodes # Currently limited to Python 3.6+. Use command line arguments #", "information received to send over # the ROS network. # In the future,", "False # Loop plan: # 1. Ask Decawave for position # 2. Receive", "distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x, y, z, q]) return", "typeTLV = ser.read(1) # Read the \"type\" byte of the response lengthTLV =", "ser.read(1) # Read the \"length\" byte of the response lengthTLV = int.from_bytes(lengthTLV, byteorder='little')", "to ask for position. DWM_LOC_GET = bytes.fromhex(\"0c 00\") # Request for position +", "byte of the response lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV) # Read", "TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response:", "= value[offset:offset+2].hex() # UWB address d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance", "in range(num_distances): [addr, d, dq, x, y, z, q] = distances[i] print(\"{:<5} {:<15}", "dq)) def getLocations(): # 1. Ask Decawave for Position and distances temp =", "value]= receiveTLV() if value != ERR_CODE_OK: print(\"Received an error message. Flushing input buffer.\")", "def parsePOSvalue(value): # This helper function takes a 13-byte position code and returns", "Value] # If it receives TLV_TYPE_DUMMY, it keeps listening for next message global", "{:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i in range(num_distances): [addr,", "= bytes.fromhex(\"48\") # Response: Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response:", "13: print(\"No distances received\") else: printTLV(typeTLV, length, value) # The following lines allow", "# DWM1001 Firmware API Guide 5.3.10. # It parses the information received to", "sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE: return EXIT_FAILURE # ------------- # 2. Receive response.", "Ask Decawave for position # 2. Receive response, parsing as I go #", "length, value) for i in range(num_distances): [addr, d, dq, x, y, z, q]", "lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV) # Read the value [error code].", "port:\", args.port) return args.port ser = None # This will be the name", "This helper function takes a 13-byte position code and returns the # x,", "# ON Windows, the port name may be 'COM9' or similar def myParser():", "port to use with a command line argument. # --port=[name or number] parser", "command or broken TLV frame # 2: internal error # 3: invalid parameter", "the buffer that could confuse this script. ser.write(txBuffer) except: print(f\"Error during transmission of", "ask for position. DWM_LOC_GET = bytes.fromhex(\"0c 00\") # Request for position + distances", "ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV, length, value) [typeTLV, length, value] = receiveTLV() #", "Python 3.6+. Use command line arguments # to specify the name of the", "print(\"Received an error message. Flushing input buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE # ---------Now,", "# This script calls the dwm_loc_get API call as specified in the #", "get the position [typeTLV, length, value] = receiveTLV() # Expect Position if length", "DWM_POS_SET = bytes.fromhex(\"01 0d\") # Used to set position. Follow with position as", "[typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value): # This helper function takes a 13-byte position", "code # --Second response is position # 2.5 Error handling # 3. Output", "parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d, dq, x, y, z,", "typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x',", "# Script descript. parser.add_argument( '--port', default=defaultPortName, help='specify the name of the port to", "handles command lets the user specify the # name of the port to", "# ---------Now, I read until I get the position [typeTLV, length, value] =", "d, dq] = distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq)) def getLocations(): # 1.", "= [] for i in range (num_distances): offset = i*13+1 addr = value[offset:offset+8].hex()", "receiveTLV() # Expect Position if length < 13: print(\"No position received. Flushing buffer.\")", "== TLV_TYPE_RNG_AN_DIST: # This code may be received from an anchor node num_distances", "an anchor node num_distances = int.from_bytes(value[0:1]) distances = [] for i in range", "ser.reset_input_buffer() # Get rid of anything in the buffer that could confuse this", "5.3.10. # It parses the information received to send over # the ROS", "Ranging anchor distances and positions if typeTLV == TLV_TYPE_POS_XYZ: [x, y, z, q]", "= int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d,", "to parse TLV of type not yet supported.\") return EXIT_FAILURE def printTLV(typeTLV, length,", "# In the future, this script will be expanded to allow # position", "[x, y, z, q] def parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\") #", "I read until I get the position [typeTLV, length, value] = receiveTLV() #", "defaultPortName = '/dev/ttyACM0' # On linux, you should use /dev/ttyACM0 # ON Windows,", "distances] # Default case: print(\"Error: attempted to parse TLV of type not yet", "distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions if", "serial # use \"pip install pyserial\" if you have not already done so", "byteorder='little') y = int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13], byteorder='little')", "parser = argparse.ArgumentParser(description = 'get position info') # Script descript. parser.add_argument( '--port', default=defaultPortName,", "True return EXIT_FAILURE return EXIT_SUCCESS def receiveTLV(): # Listen for TLV response from", "Returns a list of [Type, Length, Value] # If it receives TLV_TYPE_DUMMY, it", "may be 'COM9' or similar def myParser(): # This function handles command lets", "install pyserial\" if you have not already done so import time import sys", "# 5: operation not permitted # API Commands [Type, length] DWM_POS_SET = bytes.fromhex(\"01", "Response position coordinates x,y,z with q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging", "{:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q')) [num_distances, distances] = parseTLV(typeTLV, length,", "{txBuffer.hex()}\") stopLoop = True return EXIT_FAILURE return EXIT_SUCCESS def receiveTLV(): # Listen for", "to send over # the ROS network. # In the future, this script", "number] parser = argparse.ArgumentParser(description = 'get position info') # Script descript. parser.add_argument( '--port',", "= int.from_bytes(value[0:1], byteorder = 'little') distances = [] for i in range(num_distances): offset", "connect to serial port {}\".format(myPort)) stopLoop = False # Loop plan: # 1.", "# 2. Receive response, parsing as I go # --First response is confirmation", "be received from an anchor node num_distances = int.from_bytes(value[0:1]) distances = [] for", "== TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder = 'little') distances = [] for i", "length, value] = receiveTLV() # Expect Position if length < 13: print(\"No position", "length < 13: print(\"No distances received\") else: printTLV(typeTLV, length, value) # The following", "+ distances to anchors/tags # Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for", "TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\")", "specify the # name of the port to use with a command line", "to use (default: ' + defaultPortName + ' )' ) args = parser.parse_args()", "TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY): typeTLV = ser.read(1) # Read the \"type\" byte", "# 2.5 Error handling # 3. Output message # ---------- while stopLoop is", "int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13],", "{:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q')) [num_distances, distances]", "port {}\".format(myPort)) stopLoop = False # Loop plan: # 1. Ask Decawave for", "transmission of request {txBuffer.hex()}\") stopLoop = True return EXIT_FAILURE return EXIT_SUCCESS def receiveTLV():", "2019 by <NAME> # Intended for use with DWM 1001 module through UART", "= bytes.fromhex(\"41\") # Response position coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") #", "position received. Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV, length, value) [typeTLV, length,", "global ser txBuffer = request try: ser.reset_input_buffer() # Get rid of anything in", "TLV of type not yet supported.\") return EXIT_FAILURE def printTLV(typeTLV, length, value): if", "print(\"Error in trying to connect to serial port {}\".format(myPort)) stopLoop = False #", "getLocations() def sendTLV(request): global ser txBuffer = request try: ser.reset_input_buffer() # Get rid", "# If it receives TLV_TYPE_DUMMY, it keeps listening for next message global ser", "request try: ser.reset_input_buffer() # Get rid of anything in the buffer that could", "ser txBuffer = request try: ser.reset_input_buffer() # Get rid of anything in the", "handle for the serial port connection typeTLV = TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY):", "read until I get the position [typeTLV, length, value] = receiveTLV() # Expect", "= int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr, d, dq]) return [num_distances, distances] if typeTLV", "This code may be received from an anchor node num_distances = int.from_bytes(value[0:1]) distances", "positions def main(): global ser print(\"dwmPosGet started.\") myPort = myParser() # Establish serial", "= int.from_bytes(value[0:1]) distances = [] for i in range (num_distances): offset = i*13+1", "is False: getLocations() def sendTLV(request): global ser txBuffer = request try: ser.reset_input_buffer() #", "dq, x, y, z, q)) if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances,", "use with a command line argument. # --port=[name or number] parser = argparse.ArgumentParser(description", "i*13+1 addr = value[offset:offset+2].hex() # UWB address d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little')", "q] if typeTLV == TLV_TYPE_RNG_AN_DIST: # This code may be received from an", "Reserved for SPI dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z", "receiveTLV() if value != ERR_CODE_OK: print(\"Received an error message. Flushing input buffer.\") print(value)", "print(\"No distances received\") else: printTLV(typeTLV, length, value) # The following lines allow this", "if temp == EXIT_FAILURE: return EXIT_FAILURE # ------------- # 2. Receive response. May", "# 3: invalid parameter # 4: busy # 5: operation not permitted #", "d, dq, x, y, z, q]) return [num_distances, distances] # Default case: print(\"Error:", "z, q]) return [num_distances, distances] # Default case: print(\"Error: attempted to parse TLV", "d, dq]) return [num_distances, distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder", "broken TLV frame # 2: internal error # 3: invalid parameter # 4:", "distances temp = sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE: return EXIT_FAILURE # ------------- #", "bytes.fromhex(\"41\") # Response position coordinates x,y,z with q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") #", "== TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length, value) print(\"{:<15}", "The handle for the serial port connection typeTLV = TLV_TYPE_DUMMY while (typeTLV ==", "rid of anything in the buffer that could confuse this script. ser.write(txBuffer) except:", "def sendTLV(request): global ser txBuffer = request try: ser.reset_input_buffer() # Get rid of", "length, value] = receiveTLV() # Expect Distances if length < 13: print(\"No distances", "[error code]. return [typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value): # This helper function takes", "bytes.fromhex(\"0c 00\") # Request for position + distances to anchors/tags # Response codes", "future, this script will be expanded to allow # position updates to be", "script calls the dwm_loc_get API call as specified in the # DWM1001 Firmware", "= 0 EXIT_FAILURE = 1 # API Error codes ERR_CODE_OK = bytes.fromhex(\"00\") #", "bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions if typeTLV == TLV_TYPE_POS_XYZ: [x,", "TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length, value) print(\"{:<15} {:<15}", "ERR_CODE_OK: print(\"Received an error message. Flushing input buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE #", "d, dq, x, y, z, q] = distances[i] print(\"{:<5} {:<15} {:<5} {:<15} {:<15}", "# Expect Distances if length < 13: print(\"No distances received\") else: printTLV(typeTLV, length,", "i in range(num_distances): [addr, d, dq] = distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq))", "of the port to use with a command line argument. # --port=[name or", "dummy byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with q", "# Note: Address size is 8 bytes here, not 2 bytes d =", "# Response: Ranging anchor distances and positions if typeTLV == TLV_TYPE_POS_XYZ: [x, y,", "length, value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5}", "= 1 # API Error codes ERR_CODE_OK = bytes.fromhex(\"00\") # 1: unknown command", "= bytes.fromhex(\"01 0d\") # Used to set position. Follow with position as 13", "message # ---------- while stopLoop is False: getLocations() def sendTLV(request): global ser txBuffer", "dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST", "int.from_bytes(value[0:1]) distances = [] for i in range (num_distances): offset = i*13+1 addr", "print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length, value) print(\"{:<15} {:<15} {:<15}", "use with DWM 1001 module through UART TLV interface # This script calls", "port connection try: ser = serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection established.\") except: print(\"Error", "connection typeTLV = TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY): typeTLV = ser.read(1) # Read", "# Loop plan: # 1. Ask Decawave for position # 2. Receive response,", "value) for i in range(num_distances): [addr, d, dq, x, y, z, q] =", "Ranging anchor distances and positions def main(): global ser print(\"dwmPosGet started.\") myPort =", "Decawave DWM1001 module # Returns a list of [Type, Length, Value] # If", "length, value): if typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] =", "length, value) for i in range(num_distances): [addr, d, dq] = distances[i] print(\"{:<5} {:<15}", "y, z, q)) if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] =", "of request {txBuffer.hex()}\") stopLoop = True return EXIT_FAILURE return EXIT_SUCCESS def receiveTLV(): #", "DWM_LOC_GET = bytes.fromhex(\"0c 00\") # Request for position + distances to anchors/tags #", "returns the # x, y, z, and q values x = int.from_bytes(value[0:4], byteorder='little')", "help='specify the name of the port to use (default: ' + defaultPortName +", "for position. DWM_LOC_GET = bytes.fromhex(\"0c 00\") # Request for position + distances to", "# the ROS network. # In the future, this script will be expanded", "= bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions if typeTLV == TLV_TYPE_POS_XYZ:", "length, value) # The following lines allow this script to run as a", "{:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d,", "byteorder='little') z = int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13], byteorder='little') return [x, y, z,", "error message. Flushing input buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE # ---------Now, I read", "TLV_TYPE_DUMMY): typeTLV = ser.read(1) # Read the \"type\" byte of the response lengthTLV", "of the response lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV) # Read the", "if value != ERR_CODE_OK: print(\"Received an error message. Flushing input buffer.\") print(value) ser.reset_input_buffer()", "that could confuse this script. ser.write(txBuffer) except: print(f\"Error during transmission of request {txBuffer.hex()}\")", "list of [Type, Length, Value] # If it receives TLV_TYPE_DUMMY, it keeps listening", "TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\")", "'little') distances = [] for i in range(num_distances): offset = i*13+1 addr =", "and positions if typeTLV == TLV_TYPE_POS_XYZ: [x, y, z, q] = parsePOSvalue(value) return", "# Establish serial port connection try: ser = serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection", "range(num_distances): offset = i*13+1 addr = value[offset:offset+2].hex() # UWB address d = int.from_bytes(value[offset+2:offset+6],", "Intended for use with DWM 1001 module through UART TLV interface # This", "command line argument. # --port=[name or number] parser = argparse.ArgumentParser(description = 'get position", "z, q] if typeTLV == TLV_TYPE_RNG_AN_DIST: # This code may be received from", "timeout=None) print(ser) print(\"Connection established.\") except: print(\"Error in trying to connect to serial port", "return EXIT_FAILURE # ---------Now, I read until I get the position [typeTLV, length,", "main(): global ser print(\"dwmPosGet started.\") myPort = myParser() # Establish serial port connection", "x,y,z with q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances #", "byteorder = 'little') # distance dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance", "the response lengthTLV = ser.read(1) # Read the \"length\" byte of the response", "for position # 2. Receive response, parsing as I go # --First response", "position code and returns the # x, y, z, and q values x", "in range(num_distances): [addr, d, dq] = distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq)) def", "print(value) ser.reset_input_buffer() return EXIT_FAILURE # ---------Now, I read until I get the position", "else: printTLV(typeTLV, length, value) # The following lines allow this script to run", "print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq)) def getLocations(): # 1. Ask Decawave for Position", "the # DWM1001 Firmware API Guide 5.3.10. # It parses the information received", "received to send over # the ROS network. # In the future, this", "--Second response is position # 2.5 Error handling # 3. Output message #", "if length < 13: print(\"No position received. Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else:", "position # 2. Receive response, parsing as I go # --First response is", "== TLV_TYPE_DUMMY): typeTLV = ser.read(1) # Read the \"type\" byte of the response", "with position as 13 bytes DWM_POS_GET = bytes.fromhex(\"02 00\") # Used to ask", "\"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length, value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q))", "import time import sys import argparse defaultPortName = '/dev/ttyACM0' # On linux, you", "# Used to set position. Follow with position as 13 bytes DWM_POS_GET =", "import sys import argparse defaultPortName = '/dev/ttyACM0' # On linux, you should use", "# Reserved for SPI dummy byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position", "the information received to send over # the ROS network. # In the", "anchor distances and positions def main(): global ser print(\"dwmPosGet started.\") myPort = myParser()", "= 'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr, d, dq]) return [num_distances,", "def myParser(): # This function handles command lets the user specify the #", "the # name of the port to use with a command line argument.", "dwm_loc_get API call as specified in the # DWM1001 Firmware API Guide 5.3.10.", "The following lines allow this script to run as a program if called", "Output message # ---------- while stopLoop is False: getLocations() def sendTLV(request): global ser", "SPI dummy byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with", "x, y, z, q]) return [num_distances, distances] # Default case: print(\"Error: attempted to", "DWM1001 module # Returns a list of [Type, Length, Value] # If it", "request {txBuffer.hex()}\") stopLoop = True return EXIT_FAILURE return EXIT_SUCCESS def receiveTLV(): # Listen", "offset = i*13+1 addr = value[offset:offset+8].hex() # Note: Address size is 8 bytes", "the port name may be 'COM9' or similar def myParser(): # This function", "q]) return [num_distances, distances] # Default case: print(\"Error: attempted to parse TLV of", "value) # The following lines allow this script to run as a program", "I go # --First response is confirmation / error code # --Second response", "EXIT_FAILURE: return EXIT_FAILURE # ------------- # 2. Receive response. May get dummy bytes", "name of the port to use with a command line argument. # --port=[name", "= parseTLV(typeTLV, length, value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5}", "lengthTLV, valueTLV] def parsePOSvalue(value): # This helper function takes a 13-byte position code", "parsePOSvalue(value): # This helper function takes a 13-byte position code and returns the", "Error codes ERR_CODE_OK = bytes.fromhex(\"00\") # 1: unknown command or broken TLV frame", "/dev/ttyACM0 # ON Windows, the port name may be 'COM9' or similar def", "while stopLoop is False: getLocations() def sendTLV(request): global ser txBuffer = request try:", "dq, x, y, z, q]) return [num_distances, distances] # Default case: print(\"Error: attempted", "{:<15} {:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr',", "for position + distances to anchors/tags # Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") #", "anchors/tags # Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte", "user specify the # name of the port to use with a command", "int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr, d, dq])", "return EXIT_FAILURE def printTLV(typeTLV, length, value): if typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15}", "# TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions if typeTLV", "position + distances to anchors/tags # Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved", "anchor distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions", "EXIT_FAILURE return EXIT_SUCCESS def receiveTLV(): # Listen for TLV response from Decawave DWM1001", "supported.\") return EXIT_FAILURE def printTLV(typeTLV, length, value): if typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15}", "lengthTLV = ser.read(1) # Read the \"length\" byte of the response lengthTLV =", "this script will be expanded to allow # position updates to be written", "dq] = distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq)) def getLocations(): # 1. Ask", "value != ERR_CODE_OK: print(\"Received an error message. Flushing input buffer.\") print(value) ser.reset_input_buffer() return", "{:<5}\".format(addr, d, dq, x, y, z, q)) if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15}", "Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and", "False: getLocations() def sendTLV(request): global ser txBuffer = request try: ser.reset_input_buffer() # Get", "z = int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13], byteorder='little') return [x, y, z, q]", "pyserial\" if you have not already done so import time import sys import", "with a command line argument. # --port=[name or number] parser = argparse.ArgumentParser(description =", "\"length\" byte of the response lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV) #", "function) import serial # use \"pip install pyserial\" if you have not already", "= request try: ser.reset_input_buffer() # Get rid of anything in the buffer that", "'/dev/ttyACM0' # On linux, you should use /dev/ttyACM0 # ON Windows, the port", "13 bytes DWM_POS_GET = bytes.fromhex(\"02 00\") # Used to ask for position. DWM_LOC_GET", "if typeTLV == TLV_TYPE_RNG_AN_DIST: # This code may be received from an anchor", "print(\"Connection established.\") except: print(\"Error in trying to connect to serial port {}\".format(myPort)) stopLoop", "This will be the name of the handle to the serial port EXIT_SUCCESS", "# Used to ask for position. DWM_LOC_GET = bytes.fromhex(\"0c 00\") # Request for", "{:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length, value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if", "takes a 13-byte position code and returns the # x, y, z, and", "the user specify the # name of the port to use with a", "name may be 'COM9' or similar def myParser(): # This function handles command", "argument. # --port=[name or number] parser = argparse.ArgumentParser(description = 'get position info') #", "4: busy # 5: operation not permitted # API Commands [Type, length] DWM_POS_SET", "position # 2.5 Error handling # 3. Output message # ---------- while stopLoop", "for i in range (num_distances): offset = i*13+1 addr = value[offset:offset+8].hex() # Note:", "response is position # 2.5 Error handling # 3. Output message # ----------", "# x, y, z, and q values x = int.from_bytes(value[0:4], byteorder='little') y =", "message. Flushing input buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE # ---------Now, I read until", "you have not already done so import time import sys import argparse defaultPortName", "for use with DWM 1001 module through UART TLV interface # This script", "global ser print(\"dwmPosGet started.\") myPort = myParser() # Establish serial port connection try:", "stopLoop is False: getLocations() def sendTLV(request): global ser txBuffer = request try: ser.reset_input_buffer()", "= ser.read(lengthTLV) # Read the value [error code]. return [typeTLV, lengthTLV, valueTLV] def", "confuse this script. ser.write(txBuffer) except: print(f\"Error during transmission of request {txBuffer.hex()}\") stopLoop =", "value[offset:offset+2].hex() # UWB address d = int.from_bytes(value[offset+2:offset+6], byteorder = 'little') # distance dq", "q] = distances[i] print(\"{:<5} {:<15} {:<5} {:<15} {:<15} {:<15} {:<5}\".format(addr, d, dq, x,", "set position. Follow with position as 13 bytes DWM_POS_GET = bytes.fromhex(\"02 00\") #", "parsing as I go # --First response is confirmation / error code #", "so import time import sys import argparse defaultPortName = '/dev/ttyACM0' # On linux,", "Commands [Type, length] DWM_POS_SET = bytes.fromhex(\"01 0d\") # Used to set position. Follow", "import argparse defaultPortName = '/dev/ttyACM0' # On linux, you should use /dev/ttyACM0 #", "Flushing input buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE # ---------Now, I read until I", "have not already done so import time import sys import argparse defaultPortName =", "Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions", "int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr, d, dq]) return [num_distances, distances] if typeTLV ==", "# 1. Ask Decawave for Position and distances temp = sendTLV(DWM_LOC_GET) if temp", "# distance dq = int.from_bytes(value[offset+6:offset+7], byteorder = 'little') # distance quality [x,y,z,q] =", "'little') # distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x, y, z,", "[num_distances, distances] = parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d, dq,", "DWM_POS_GET = bytes.fromhex(\"02 00\") # Used to ask for position. DWM_LOC_GET = bytes.fromhex(\"0c", "go # --First response is confirmation / error code # --Second response is", "the port to use with a command line argument. # --port=[name or number]", "If it receives TLV_TYPE_DUMMY, it keeps listening for next message global ser #", "length, value) [typeTLV, length, value] = receiveTLV() # Expect Distances if length <", "baudrate=115200, timeout=None) print(ser) print(\"Connection established.\") except: print(\"Error in trying to connect to serial", "return EXIT_SUCCESS def receiveTLV(): # Listen for TLV response from Decawave DWM1001 module", "argparse.ArgumentParser(description = 'get position info') # Script descript. parser.add_argument( '--port', default=defaultPortName, help='specify the", "13-byte position code and returns the # x, y, z, and q values", "position updates to be written to anchor nodes # Currently limited to Python", "# The following lines allow this script to run as a program if", "q)) if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV, length,", "for i in range(num_distances): offset = i*13+1 addr = value[offset:offset+2].hex() # UWB address", "int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13], byteorder='little') return [x, y, z, q] def parseTLV(typeTLV,", "call as specified in the # DWM1001 Firmware API Guide 5.3.10. # It", "UART TLV interface # This script calls the dwm_loc_get API call as specified", "TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions if typeTLV ==", "parses the information received to send over # the ROS network. # In", "'q')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i in range(num_distances): [addr, d,", "coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST", "or similar def myParser(): # This function handles command lets the user specify", "bytes.fromhex(\"48\") # Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor", "= bytes.fromhex(\"41\") # Response position coordinates x,y,z with q # TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\")", "{:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q')) [num_distances, distances] =", "'x', 'y', 'z', 'q')) [num_distances, distances] = parseTLV(typeTLV, length, value) for i in", "use /dev/ttyACM0 # ON Windows, the port name may be 'COM9' or similar", "x,y,z with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST =", "Loop plan: # 1. Ask Decawave for position # 2. Receive response, parsing", "Response: Ranging anchor distances and positions def main(): global ser print(\"dwmPosGet started.\") myPort", "length, value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte #", "print(\"No position received. Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE else: printTLV(typeTLV, length, value) [typeTLV,", "Length, Value] # If it receives TLV_TYPE_DUMMY, it keeps listening for next message", "1. Ask Decawave for position # 2. Receive response, parsing as I go", "'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr, d, dq]) return [num_distances, distances]", "through UART TLV interface # This script calls the dwm_loc_get API call as", "updates to be written to anchor nodes # Currently limited to Python 3.6+.", "to the serial port EXIT_SUCCESS = 0 EXIT_FAILURE = 1 # API Error", "# On linux, you should use /dev/ttyACM0 # ON Windows, the port name", "# ---------- while stopLoop is False: getLocations() def sendTLV(request): global ser txBuffer =", "# This helper function takes a 13-byte position code and returns the #", "Position and distances temp = sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE: return EXIT_FAILURE #", "get dummy bytes before real response. [typeTLV, length, value]= receiveTLV() if value !=", "receiveTLV() # Expect Distances if length < 13: print(\"No distances received\") else: printTLV(typeTLV,", "Request for position + distances to anchors/tags # Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\")", "{:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length, value) print(\"{:<15} {:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV", "port (See myParser() function) import serial # use \"pip install pyserial\" if you", "with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\")", "= TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY): typeTLV = ser.read(1) # Read the \"type\"", "EXIT_FAILURE # ---------Now, I read until I get the position [typeTLV, length, value]", "!= ERR_CODE_OK: print(\"Received an error message. Flushing input buffer.\") print(value) ser.reset_input_buffer() return EXIT_FAILURE", "parameter # 4: busy # 5: operation not permitted # API Commands [Type,", "= ser.read(1) # Read the \"length\" byte of the response lengthTLV = int.from_bytes(lengthTLV,", "TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances # TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") #", "(num_distances): offset = i*13+1 addr = value[offset:offset+8].hex() # Note: Address size is 8", "for SPI dummy byte TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with", "response is confirmation / error code # --Second response is position # 2.5", "def parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy", "positions if typeTLV == TLV_TYPE_POS_XYZ: [x, y, z, q] = parsePOSvalue(value) return [x,", "length] DWM_POS_SET = bytes.fromhex(\"01 0d\") # Used to set position. Follow with position", "error code # --Second response is position # 2.5 Error handling # 3.", "int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV) # Read the value [error code]. return [typeTLV,", "EXIT_FAILURE = 1 # API Error codes ERR_CODE_OK = bytes.fromhex(\"00\") # 1: unknown", "1. Ask Decawave for Position and distances temp = sendTLV(DWM_LOC_GET) if temp ==", "(typeTLV == TLV_TYPE_DUMMY): typeTLV = ser.read(1) # Read the \"type\" byte of the", "line arguments # to specify the name of the port (See myParser() function)", "Receive response. May get dummy bytes before real response. [typeTLV, length, value]= receiveTLV()", "0d\") # Used to set position. Follow with position as 13 bytes DWM_POS_GET", "d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little') distances.append([addr,", "if typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length,", "i in range(num_distances): offset = i*13+1 addr = value[offset:offset+2].hex() # UWB address d", "{:=<15} {:=<5} {:=<15} {:=<15} {:=<15} {:=<5}\".format('addr', 'd', 'dq', 'x', 'y', 'z', 'q')) [num_distances,", "arguments # to specify the name of the port (See myParser() function) import", "be expanded to allow # position updates to be written to anchor nodes", "response lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV = ser.read(lengthTLV) # Read the value [error", "case: print(\"Error: attempted to parse TLV of type not yet supported.\") return EXIT_FAILURE", "d, dq, x, y, z, q)) if typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq'))", "as I go # --First response is confirmation / error code # --Second", "typeTLV == TLV_TYPE_POS_XYZ: [x, y, z, q] = parsePOSvalue(value) return [x, y, z,", "TLV frame # 2: internal error # 3: invalid parameter # 4: busy", "ERR_CODE_OK = bytes.fromhex(\"00\") # 1: unknown command or broken TLV frame # 2:", "# Default case: print(\"Error: attempted to parse TLV of type not yet supported.\")", "response, parsing as I go # --First response is confirmation / error code", "= None # This will be the name of the handle to the", "will be expanded to allow # position updates to be written to anchor", "codes ERR_CODE_OK = bytes.fromhex(\"00\") # 1: unknown command or broken TLV frame #", "2.5 Error handling # 3. Output message # ---------- while stopLoop is False:", "# TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates x,y,z with q # TLV_TYPE_RNG_AN_DIST", "type not yet supported.\") return EXIT_FAILURE def printTLV(typeTLV, length, value): if typeTLV ==", "dummy bytes before real response. [typeTLV, length, value]= receiveTLV() if value != ERR_CODE_OK:", "stopLoop = False # Loop plan: # 1. Ask Decawave for position #", "established.\") except: print(\"Error in trying to connect to serial port {}\".format(myPort)) stopLoop =", "args.port ser = None # This will be the name of the handle", "# ------------- # 2. Receive response. May get dummy bytes before real response.", "distances = [] for i in range (num_distances): offset = i*13+1 addr =", "distances received\") else: printTLV(typeTLV, length, value) # The following lines allow this script", "3: invalid parameter # 4: busy # 5: operation not permitted # API", "Response position coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor", "try: ser = serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection established.\") except: print(\"Error in trying", "00\") # Request for position + distances to anchors/tags # Response codes TLV_TYPE_DUMMY", "range(num_distances): [addr, d, dq] = distances[i] print(\"{:<5} {:<15} {:<5}\".format(addr, d, dq)) def getLocations():", "should use /dev/ttyACM0 # ON Windows, the port name may be 'COM9' or", "---------- while stopLoop is False: getLocations() def sendTLV(request): global ser txBuffer = request", "anything in the buffer that could confuse this script. ser.write(txBuffer) except: print(f\"Error during", "range (num_distances): offset = i*13+1 addr = value[offset:offset+8].hex() # Note: Address size is", "size is 8 bytes here, not 2 bytes d = int.from_bytes(value[offset+8:offset+12], byteorder =", "typeTLV == TLV_TYPE_POS_XYZ: print( \"{:_<15} {:_<15} {:_<15} {:_<5}\".format('x','y','z','q')) [x,y,z,q] = parseTLV(typeTLV, length, value)", "it keeps listening for next message global ser # The handle for the", "code and returns the # x, y, z, and q values x =", "during transmission of request {txBuffer.hex()}\") stopLoop = True return EXIT_FAILURE return EXIT_SUCCESS def", "# --port=[name or number] parser = argparse.ArgumentParser(description = 'get position info') # Script", "serial port connection typeTLV = TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY): typeTLV = ser.read(1)", "# API Commands [Type, length] DWM_POS_SET = bytes.fromhex(\"01 0d\") # Used to set", "default=defaultPortName, help='specify the name of the port to use (default: ' + defaultPortName", "distances = [] for i in range(num_distances): offset = i*13+1 addr = value[offset:offset+2].hex()", "myParser() # Establish serial port connection try: ser = serial.Serial(myPort, baudrate=115200, timeout=None) print(ser)", "y, z, q] def parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved", "receives TLV_TYPE_DUMMY, it keeps listening for next message global ser # The handle", "argparse defaultPortName = '/dev/ttyACM0' # On linux, you should use /dev/ttyACM0 # ON", "for the serial port connection typeTLV = TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY): typeTLV", "position coordinates x,y,z with q TLV_TYPE_RNG_AN_DIST = bytes.fromhex(\"48\") # Response: Ranging anchor distances", "= parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x, y, z, q]) return [num_distances, distances] #", "anchor distances TLV_TYPE_RNG_AN_POS_DIST = bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions def", "+ defaultPortName + ' )' ) args = parser.parse_args() print(\"Using port:\", args.port) return", "Script descript. parser.add_argument( '--port', default=defaultPortName, help='specify the name of the port to use", "# Returns a list of [Type, Length, Value] # If it receives TLV_TYPE_DUMMY,", "# 2: internal error # 3: invalid parameter # 4: busy # 5:", "# Read the \"length\" byte of the response lengthTLV = int.from_bytes(lengthTLV, byteorder='little') valueTLV", "TLV interface # This script calls the dwm_loc_get API call as specified in", "print(\"Using port:\", args.port) return args.port ser = None # This will be the", "args.port) return args.port ser = None # This will be the name of", "a 13-byte position code and returns the # x, y, z, and q", "bytes d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq = int.from_bytes(value[offset+12:offset+13], byteorder = 'little')", "a list of [Type, Length, Value] # If it receives TLV_TYPE_DUMMY, it keeps", "offset = i*13+1 addr = value[offset:offset+2].hex() # UWB address d = int.from_bytes(value[offset+2:offset+6], byteorder", "i in range(num_distances): [addr, d, dq, x, y, z, q] = distances[i] print(\"{:<5}", "1001 module through UART TLV interface # This script calls the dwm_loc_get API", "frame # 2: internal error # 3: invalid parameter # 4: busy #", "Windows, the port name may be 'COM9' or similar def myParser(): # This", "return [x, y, z, q] def parseTLV(typeTLV, length, value): # TLV_TYPE_DUMMY = bytes.fromhex(\"00\")", "here, not 2 bytes d = int.from_bytes(value[offset+8:offset+12], byteorder = 'little') dq = int.from_bytes(value[offset+12:offset+13],", "Response codes TLV_TYPE_DUMMY = bytes.fromhex(\"00\") # Reserved for SPI dummy byte TLV_TYPE_POS_XYZ =", "Decawave for Position and distances temp = sendTLV(DWM_LOC_GET) if temp == EXIT_FAILURE: return", "typeTLV == TLV_TYPE_RNG_AN_DIST: print(\"{:=<5} {:=<15} {:=<5}\".format('addr','d','dq')) [num_distances, distances] = parseTLV(typeTLV, length, value) for", "Read the \"type\" byte of the response lengthTLV = ser.read(1) # Read the", "internal error # 3: invalid parameter # 4: busy # 5: operation not", "serial.Serial(myPort, baudrate=115200, timeout=None) print(ser) print(\"Connection established.\") except: print(\"Error in trying to connect to", "y, z, and q values x = int.from_bytes(value[0:4], byteorder='little') y = int.from_bytes(value[4:8], byteorder='little')", "Reserved for SPI dummy byte # TLV_TYPE_POS_XYZ = bytes.fromhex(\"41\") # Response position coordinates", "Get rid of anything in the buffer that could confuse this script. ser.write(txBuffer)", "dq, x, y, z, q] = distances[i] print(\"{:<5} {:<15} {:<5} {:<15} {:<15} {:<15}", "[x, y, z, q] if typeTLV == TLV_TYPE_RNG_AN_DIST: # This code may be", "# This function handles command lets the user specify the # name of", "Position if length < 13: print(\"No position received. Flushing buffer.\") ser.reset_input_buffer() return EXIT_FAILURE", "as specified in the # DWM1001 Firmware API Guide 5.3.10. # It parses", "None # This will be the name of the handle to the serial", "this script. ser.write(txBuffer) except: print(f\"Error during transmission of request {txBuffer.hex()}\") stopLoop = True", "TLV_TYPE_DUMMY, it keeps listening for next message global ser # The handle for", "{:<15} {:<15} {:<5}\".format(x,y,z,q)) if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: print(\"{:=<5} {:=<15} {:=<5} {:=<15} {:=<15} {:=<15}", "or broken TLV frame # 2: internal error # 3: invalid parameter #", "= ser.read(1) # Read the \"type\" byte of the response lengthTLV = ser.read(1)", "y = int.from_bytes(value[4:8], byteorder='little') z = int.from_bytes(value[8:12], byteorder='little') q = int.from_bytes(value[12:13], byteorder='little') return", "to Python 3.6+. Use command line arguments # to specify the name of", "Currently limited to Python 3.6+. Use command line arguments # to specify the", "y, z, q] if typeTLV == TLV_TYPE_RNG_AN_DIST: # This code may be received", "be 'COM9' or similar def myParser(): # This function handles command lets the", "= value[offset:offset+8].hex() # Note: Address size is 8 bytes here, not 2 bytes", "# Currently limited to Python 3.6+. Use command line arguments # to specify", "typeTLV = TLV_TYPE_DUMMY while (typeTLV == TLV_TYPE_DUMMY): typeTLV = ser.read(1) # Read the", "script will be expanded to allow # position updates to be written to", "+ ' )' ) args = parser.parse_args() print(\"Using port:\", args.port) return args.port ser", "bytes.fromhex(\"49\") # Response: Ranging anchor distances and positions def main(): global ser print(\"dwmPosGet", "Read the value [error code]. return [typeTLV, lengthTLV, valueTLV] def parsePOSvalue(value): # This", "i in range (num_distances): offset = i*13+1 addr = value[offset:offset+8].hex() # Note: Address", "dq]) return [num_distances, distances] if typeTLV == TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder =", "TLV_TYPE_RNG_AN_POS_DIST: num_distances = int.from_bytes(value[0:1], byteorder = 'little') distances = [] for i in", "script to run as a program if called directly. if __name__ == \"__main__\":", "byteorder = 'little') # distance quality [x,y,z,q] = parsePOSvalue(value[offset+7:offset+20]) distances.append([addr, d, dq, x," ]
[ "local_controller = self.controller cavern = local_controller.caverns if local_controller.hives and not cavern: return False", "not local_controller.hives and not await BuildHive.morphing_lairs(self): return False if cavern.ready: return len(local_controller.ultralisks) *", "from sc2.constants import HYDRALISK from actions.build.hive import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\"", "now\"\"\" def __init__(self, main): self.controller = main BuildHive.__init__(self, self.controller) async def should_handle(self): \"\"\"Requirements", "is smoother\"\"\" local_controller = self.controller cavern = local_controller.caverns if local_controller.hives and not cavern:", "not local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute the action of training hydras\"\"\" local_controller =", "goes here\"\"\" from sc2.constants import HYDRALISK from actions.build.hive import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok", "return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras) return not local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute", "= local_controller.caverns if local_controller.hives and not cavern: return False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready):", "* 2.75 > len(local_controller.hydras) return not local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute the action", "import HYDRALISK from actions.build.hive import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def __init__(self,", "and not await BuildHive.morphing_lairs(self): return False if cavern.ready: return len(local_controller.ultralisks) * 2.75 >", "not cavern: return False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if local_controller.pits.ready and", "training hydralisks goes here\"\"\" from sc2.constants import HYDRALISK from actions.build.hive import BuildHive class", "transition to hive is smoother\"\"\" local_controller = self.controller cavern = local_controller.caverns if local_controller.hives", "run handle, it limits the training a little so it keeps building ultralisks,", "and not cavern: return False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if local_controller.pits.ready", "handle, it limits the training a little so it keeps building ultralisks, needs", "so it keeps building ultralisks, needs more limitations so the transition to hive", "for now\"\"\" def __init__(self, main): self.controller = main BuildHive.__init__(self, self.controller) async def should_handle(self):", "a little so it keeps building ultralisks, needs more limitations so the transition", "self.controller) async def should_handle(self): \"\"\"Requirements to run handle, it limits the training a", "smoother\"\"\" local_controller = self.controller cavern = local_controller.caverns if local_controller.hives and not cavern: return", "local_controller.hives and not await BuildHive.morphing_lairs(self): return False if cavern.ready: return len(local_controller.ultralisks) * 2.75", "main BuildHive.__init__(self, self.controller) async def should_handle(self): \"\"\"Requirements to run handle, it limits the", "import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def __init__(self, main): self.controller = main", "BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def __init__(self, main): self.controller = main BuildHive.__init__(self,", "False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if local_controller.pits.ready and not local_controller.hives and", "\"\"\"Everything related to training hydralisks goes here\"\"\" from sc2.constants import HYDRALISK from actions.build.hive", "cavern: return False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if local_controller.pits.ready and not", "training a little so it keeps building ultralisks, needs more limitations so the", "return False if cavern.ready: return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras) return not local_controller.floating_buildings_bm", "ultralisks, needs more limitations so the transition to hive is smoother\"\"\" local_controller =", "sc2.constants import HYDRALISK from actions.build.hive import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def", "len(local_controller.hydras) return not local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute the action of training hydras\"\"\"", "local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute the action of training hydras\"\"\" local_controller = self.controller", "if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self): return False if cavern.ready:", "from actions.build.hive import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def __init__(self, main): self.controller", "the training a little so it keeps building ultralisks, needs more limitations so", "self.controller = main BuildHive.__init__(self, self.controller) async def should_handle(self): \"\"\"Requirements to run handle, it", "len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras) return not local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute the", "keeps building ultralisks, needs more limitations so the transition to hive is smoother\"\"\"", "def __init__(self, main): self.controller = main BuildHive.__init__(self, self.controller) async def should_handle(self): \"\"\"Requirements to", "return not local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute the action of training hydras\"\"\" local_controller", "def should_handle(self): \"\"\"Requirements to run handle, it limits the training a little so", "if cavern.ready: return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras) return not local_controller.floating_buildings_bm async def", "local_controller.hydradens.ready): return False if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self): return", "should_handle(self): \"\"\"Requirements to run handle, it limits the training a little so it", "local_controller.caverns if local_controller.hives and not cavern: return False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return", "limits the training a little so it keeps building ultralisks, needs more limitations", "self.controller cavern = local_controller.caverns if local_controller.hives and not cavern: return False if not", "__init__(self, main): self.controller = main BuildHive.__init__(self, self.controller) async def should_handle(self): \"\"\"Requirements to run", "if local_controller.hives and not cavern: return False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False", "the transition to hive is smoother\"\"\" local_controller = self.controller cavern = local_controller.caverns if", "\"\"\"Requirements to run handle, it limits the training a little so it keeps", "and not local_controller.hives and not await BuildHive.morphing_lairs(self): return False if cavern.ready: return len(local_controller.ultralisks)", "here\"\"\" from sc2.constants import HYDRALISK from actions.build.hive import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for", "if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if local_controller.pits.ready and not local_controller.hives and not", "async def should_handle(self): \"\"\"Requirements to run handle, it limits the training a little", "not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if local_controller.pits.ready and not local_controller.hives and not await", "class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def __init__(self, main): self.controller = main BuildHive.__init__(self, self.controller)", "\"\"\"Ok for now\"\"\" def __init__(self, main): self.controller = main BuildHive.__init__(self, self.controller) async def", "it keeps building ultralisks, needs more limitations so the transition to hive is", "hive is smoother\"\"\" local_controller = self.controller cavern = local_controller.caverns if local_controller.hives and not", "False if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self): return False if", "cavern.ready: return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras) return not local_controller.floating_buildings_bm async def handle(self):", "False if cavern.ready: return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras) return not local_controller.floating_buildings_bm async", "to run handle, it limits the training a little so it keeps building", "return False if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self): return False", "= main BuildHive.__init__(self, self.controller) async def should_handle(self): \"\"\"Requirements to run handle, it limits", "2.75 > len(local_controller.hydras) return not local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute the action of", "limitations so the transition to hive is smoother\"\"\" local_controller = self.controller cavern =", "local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self):", "related to training hydralisks goes here\"\"\" from sc2.constants import HYDRALISK from actions.build.hive import", "it limits the training a little so it keeps building ultralisks, needs more", "def handle(self): \"\"\"Execute the action of training hydras\"\"\" local_controller = self.controller local_controller.add_action(local_controller.larvae.random.train(HYDRALISK)) return", "cavern = local_controller.caverns if local_controller.hives and not cavern: return False if not local_controller.can_train(HYDRALISK,", "= self.controller cavern = local_controller.caverns if local_controller.hives and not cavern: return False if", "needs more limitations so the transition to hive is smoother\"\"\" local_controller = self.controller", "local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self): return False if cavern.ready: return", "handle(self): \"\"\"Execute the action of training hydras\"\"\" local_controller = self.controller local_controller.add_action(local_controller.larvae.random.train(HYDRALISK)) return True", "more limitations so the transition to hive is smoother\"\"\" local_controller = self.controller cavern", "> len(local_controller.hydras) return not local_controller.floating_buildings_bm async def handle(self): \"\"\"Execute the action of training", "to training hydralisks goes here\"\"\" from sc2.constants import HYDRALISK from actions.build.hive import BuildHive", "<gh_stars>0 \"\"\"Everything related to training hydralisks goes here\"\"\" from sc2.constants import HYDRALISK from", "await BuildHive.morphing_lairs(self): return False if cavern.ready: return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras) return", "async def handle(self): \"\"\"Execute the action of training hydras\"\"\" local_controller = self.controller local_controller.add_action(local_controller.larvae.random.train(HYDRALISK))", "local_controller.hives and not cavern: return False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if", "so the transition to hive is smoother\"\"\" local_controller = self.controller cavern = local_controller.caverns", "to hive is smoother\"\"\" local_controller = self.controller cavern = local_controller.caverns if local_controller.hives and", "BuildHive.__init__(self, self.controller) async def should_handle(self): \"\"\"Requirements to run handle, it limits the training", "main): self.controller = main BuildHive.__init__(self, self.controller) async def should_handle(self): \"\"\"Requirements to run handle,", "hydralisks goes here\"\"\" from sc2.constants import HYDRALISK from actions.build.hive import BuildHive class TrainHydralisk(BuildHive):", "actions.build.hive import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def __init__(self, main): self.controller =", "TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def __init__(self, main): self.controller = main BuildHive.__init__(self, self.controller) async", "little so it keeps building ultralisks, needs more limitations so the transition to", "BuildHive.morphing_lairs(self): return False if cavern.ready: return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras) return not", "return False if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready): return False if local_controller.pits.ready and not local_controller.hives", "HYDRALISK from actions.build.hive import BuildHive class TrainHydralisk(BuildHive): \"\"\"Ok for now\"\"\" def __init__(self, main):", "not await BuildHive.morphing_lairs(self): return False if cavern.ready: return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras)", "building ultralisks, needs more limitations so the transition to hive is smoother\"\"\" local_controller" ]
[ "Initialize UI settings (for prompts) based on terminal size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0])", "{user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str, password: str) -> None: __logged_user =", "get_username(self): return self._username def print_user_menu(user: User) -> None: print() print(f\" [ USER: {user.get_username()}", "on terminal size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) + 5 # UI_input_fill_width", "init_user_program(username: str, password: str) -> None: __logged_user = User(username=username, password=password) os.system(\"clear\") print_user_menu(user=__logged_user) input(\"System", "os # Initialize UI settings based on terminal size (width) UI_fill_width = os.get_terminal_size()[0]", "prompts) based on terminal size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) + 5", "settings based on terminal size (width) UI_fill_width = os.get_terminal_size()[0] # UI_fill_width = 74", "based on terminal size (width) UI_fill_width = os.get_terminal_size()[0] # UI_fill_width = 74 UI_user_menu_fill_char", "[ USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str, password: str) -> None:", "import os # Initialize UI settings based on terminal size (width) UI_fill_width =", "UI_fill_width = 74 UI_user_menu_fill_char = '#' # Initialize UI settings (for prompts) based", "\".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str, password: str) -> None: __logged_user = User(username=username, password=password)", "password: str) -> None: __logged_user = User(username=username, password=password) os.system(\"clear\") print_user_menu(user=__logged_user) input(\"System ready ...\".center(UI_fill_width,", "= username self._password = password pass def get_username(self): return self._username def print_user_menu(user: User)", "# UI_input_fill_width = 54 UI_input_fill_char = ' ' class User: def __init__(self, username:", "self._username = username self._password = password pass def get_username(self): return self._username def print_user_menu(user:", "__init__(self, username: str, password: str): self._username = username self._password = password pass def", "print_user_menu(user: User) -> None: print() print(f\" [ USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def", "Initialize UI settings based on terminal size (width) UI_fill_width = os.get_terminal_size()[0] # UI_fill_width", "2) + 5 # UI_input_fill_width = 54 UI_input_fill_char = ' ' class User:", "str, password: str): self._username = username self._password = password pass def get_username(self): return", "username self._password = password pass def get_username(self): return self._username def print_user_menu(user: User) ->", "= 74 UI_user_menu_fill_char = '#' # Initialize UI settings (for prompts) based on", "int(int(os.get_terminal_size()[0]) / 2) + 5 # UI_input_fill_width = 54 UI_input_fill_char = ' '", "UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) + 5 # UI_input_fill_width = 54 UI_input_fill_char =", "def __init__(self, username: str, password: str): self._username = username self._password = password pass", "self._username def print_user_menu(user: User) -> None: print() print(f\" [ USER: {user.get_username()} ] \".center(UI_fill_width,", "UI settings (for prompts) based on terminal size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) /", "# Initialize UI settings (for prompts) based on terminal size (width) UI_input_fill_width =", "str) -> None: __logged_user = User(username=username, password=password) os.system(\"clear\") print_user_menu(user=__logged_user) input(\"System ready ...\".center(UI_fill_width, \"", "os.get_terminal_size()[0] # UI_fill_width = 74 UI_user_menu_fill_char = '#' # Initialize UI settings (for", "UI_user_menu_fill_char)) def init_user_program(username: str, password: str) -> None: __logged_user = User(username=username, password=password) os.system(\"clear\")", "UI_fill_width = os.get_terminal_size()[0] # UI_fill_width = 74 UI_user_menu_fill_char = '#' # Initialize UI", "terminal size (width) UI_fill_width = os.get_terminal_size()[0] # UI_fill_width = 74 UI_user_menu_fill_char = '#'", "/ 2) + 5 # UI_input_fill_width = 54 UI_input_fill_char = ' ' class", "= password pass def get_username(self): return self._username def print_user_menu(user: User) -> None: print()", "54 UI_input_fill_char = ' ' class User: def __init__(self, username: str, password: str):", "= int(int(os.get_terminal_size()[0]) / 2) + 5 # UI_input_fill_width = 54 UI_input_fill_char = '", "def init_user_program(username: str, password: str) -> None: __logged_user = User(username=username, password=password) os.system(\"clear\") print_user_menu(user=__logged_user)", "# UI_fill_width = 74 UI_user_menu_fill_char = '#' # Initialize UI settings (for prompts)", "str, password: str) -> None: __logged_user = User(username=username, password=password) os.system(\"clear\") print_user_menu(user=__logged_user) input(\"System ready", "] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str, password: str) -> None: __logged_user = User(username=username,", "def get_username(self): return self._username def print_user_menu(user: User) -> None: print() print(f\" [ USER:", "password: str): self._username = username self._password = password pass def get_username(self): return self._username", "return self._username def print_user_menu(user: User) -> None: print() print(f\" [ USER: {user.get_username()} ]", "User) -> None: print() print(f\" [ USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username:", "<gh_stars>1-10 import os # Initialize UI settings based on terminal size (width) UI_fill_width", "size (width) UI_fill_width = os.get_terminal_size()[0] # UI_fill_width = 74 UI_user_menu_fill_char = '#' #", "UI_user_menu_fill_char = '#' # Initialize UI settings (for prompts) based on terminal size", "+ 5 # UI_input_fill_width = 54 UI_input_fill_char = ' ' class User: def", "print(f\" [ USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str, password: str) ->", "print() print(f\" [ USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str, password: str)", "username: str, password: str): self._username = username self._password = password pass def get_username(self):", "self._password = password pass def get_username(self): return self._username def print_user_menu(user: User) -> None:", "= 54 UI_input_fill_char = ' ' class User: def __init__(self, username: str, password:", "-> None: print() print(f\" [ USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str,", "size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) + 5 # UI_input_fill_width = 54", "(for prompts) based on terminal size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) +", "(width) UI_fill_width = os.get_terminal_size()[0] # UI_fill_width = 74 UI_user_menu_fill_char = '#' # Initialize", "None: print() print(f\" [ USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str, password:", "# Initialize UI settings based on terminal size (width) UI_fill_width = os.get_terminal_size()[0] #", "= '#' # Initialize UI settings (for prompts) based on terminal size (width)", "' class User: def __init__(self, username: str, password: str): self._username = username self._password", "def print_user_menu(user: User) -> None: print() print(f\" [ USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char))", "terminal size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) + 5 # UI_input_fill_width =", "password pass def get_username(self): return self._username def print_user_menu(user: User) -> None: print() print(f\"", "-> None: __logged_user = User(username=username, password=password) os.system(\"clear\") print_user_menu(user=__logged_user) input(\"System ready ...\".center(UI_fill_width, \" \"))", "UI settings based on terminal size (width) UI_fill_width = os.get_terminal_size()[0] # UI_fill_width =", "UI_input_fill_width = 54 UI_input_fill_char = ' ' class User: def __init__(self, username: str,", "(width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) + 5 # UI_input_fill_width = 54 UI_input_fill_char", "UI_input_fill_char = ' ' class User: def __init__(self, username: str, password: str): self._username", "' ' class User: def __init__(self, username: str, password: str): self._username = username", "str): self._username = username self._password = password pass def get_username(self): return self._username def", "= os.get_terminal_size()[0] # UI_fill_width = 74 UI_user_menu_fill_char = '#' # Initialize UI settings", "5 # UI_input_fill_width = 54 UI_input_fill_char = ' ' class User: def __init__(self,", "User: def __init__(self, username: str, password: str): self._username = username self._password = password", "based on terminal size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2) + 5 #", "settings (for prompts) based on terminal size (width) UI_input_fill_width = int(int(os.get_terminal_size()[0]) / 2)", "= ' ' class User: def __init__(self, username: str, password: str): self._username =", "on terminal size (width) UI_fill_width = os.get_terminal_size()[0] # UI_fill_width = 74 UI_user_menu_fill_char =", "74 UI_user_menu_fill_char = '#' # Initialize UI settings (for prompts) based on terminal", "class User: def __init__(self, username: str, password: str): self._username = username self._password =", "'#' # Initialize UI settings (for prompts) based on terminal size (width) UI_input_fill_width", "pass def get_username(self): return self._username def print_user_menu(user: User) -> None: print() print(f\" [", "USER: {user.get_username()} ] \".center(UI_fill_width, UI_user_menu_fill_char)) def init_user_program(username: str, password: str) -> None: __logged_user" ]
[]
[]
[ "assets, web from modu.persist import sql from modu.editable import define from modu.editable import", "frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\"", "@type req: L{modu.web.app.Request} \"\"\" result, filename = self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename)", "items = [] directory_path = os.path.join(self.get_selected_root(req), folder_path) for item in os.listdir(directory_path): if(item.startswith('.')): continue", "% (result, file_url, filename) ])] def prepare_browser(self, req): \"\"\" Provides support for the", "\"\"\" if(self.content): return self.content return super(FCKEditorResource, self).get_content(req) def get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()}", "(fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var,", "@type req: L{modu.web.app.Request} @param folder_path: The folder to save to, relative to C{self.selected_root['root_callback']}", "\" var filemanager = '%s';\\n\" % filemgr_path, \" var connector = '%s';\\n\" %", "= self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req)", "http://modu.bubblehouse.org # # # See LICENSE for details \"\"\" Contains the FCK Editor", "= fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s' % self.name output", "FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\"", "def get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8' % self.content_type def", "= [output] def prepare_config_request(self, req): \"\"\" Uses a Cheetah template to serve up", "in self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\" Provides support for", "return frm fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config =", "return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition): \"\"\" Select a file from a given", "feature. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" result, filename =", "def get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content return super(FCKEditorResource, self).get_content(req)", "uploaded_file.close() result = SUCCESS except: import traceback print traceback.print_exc() result = UL_ACCESS_DENIED return", "the Storable. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): default_value = getattr(storable, self.get_column_name(),", "= form.FormNode(self.name) if(style == 'listing' or self.get('read_only', False)): if not(default_value): default_value = '(none)'", "= \\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width =", "@see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def get_template_root(self, req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\"", "get_search_value(self, value, req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value if(value is", "of FCKEditor, namely the image/file upload and server-side file browser. @ivar selected_root: Details", "upload directory. @type selected_root: dict @ivar content_type: The content type to be returned", "@see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content return super(FCKEditorResource, self).get_content(req) def get_template(self, req): \"\"\"", "value=default_value) return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' %", "is None): template = self.get_template(req) if(template is None): app.raise500(\"No template or content available.\")", "for t in items if t.tag == 'Folder'])] if(not folders_only): file_string = ''.join([str(t)", "folder to create @type new_folder_name: str \"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans", "app SUCCESS = 0 CUSTOM_ERROR = 1 UL_RENAME = 201 UL_INVALID_TYPE = 202", "full_path = os.path.join(directory_path, item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else:", "result, filename = self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req, folder_path, filename) self.content_type = 'text/html'", "config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()}", "1): parts[len(parts) - 2] += '-%d' % int(time.time()) filename = '.'.join(parts) result =", "400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\",", "= lambda req, *path: req.get_path(*path), ), } for key, config in options.get('allowed_roots', {}).items():", "Provides support for the FCK quick upload feature. @param req: The current request", "from modu.web import resource, app SUCCESS = 0 CUSTOM_ERROR = 1 UL_RENAME =", "Copyright (c) 2006-2010 <NAME> # http://modu.bubblehouse.org # # # See LICENSE for details", "result = SUCCESS except: import traceback print traceback.print_exc() result = UL_ACCESS_DENIED return result,", "to serve up the per-site FCK configuration file. @param req: The current request", "''): return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\",", "command_name = get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path", "continue full_path = os.path.join(directory_path, item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024)))", "% self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return frm def update_storable(self,", "@ivar content: In most cases, the content to be returned, although it will", "@see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html' self.content = None self.template = None if(req.postpath", "\\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\"", "fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s' % self.name output = tags.script(type=\"text/javascript\")[[ \"var %s =", "code to support directory listing. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']}", "req: The current request @type req: L{modu.web.app.Request} @param folder_path: The current folder, relative", "implements(editable.IDatatype) def get_element(self, req, style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name)", "if(value is ''): return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else: return", "resource implements the server-side portions of FCKEditor, namely the image/file upload and server-side", "'Standard')), \"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\", value=output) return frm def get_search_value(self, value, req,", "create @type new_folder_name: str \"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name =", "L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8' % self.content_type def get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()}", "= fck_value.tostring() else: fck_value = str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\",", "# # See LICENSE for details \"\"\" Contains the FCK Editor support for", "202 UL_ACCESS_DENIED = 203 FLD_EXISTS = 101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED = 103", "'%s';\\n\" % self.get('fck_root', '/fck'), \" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]])", "not(default_value): default_value = '(none)' frm(type='label', value=default_value) return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix", "value='(html content)') return frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(), '')) return frm fck_base_path", "req: os.path.join(req.approot, req.app.webroot), url_callback = lambda req, *path: req.get_path(*path), ), } for key,", "'') frm = form.FormNode(self.name) if(style == 'listing' or self.get('read_only', False)): if not(default_value): default_value", "+ elementName + '-value-field');\\n\", \" e.val(value);\\n\", \" e = $('#' + elementName +", "AGAINST (%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition): \"\"\" Select a", "style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name) if(style == 'listing'): frm(type='label',", "'-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\", \" var filemanager = '%s';\\n\" % filemgr_path, \"", "'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key = req.postpath[0] else: root_key = '__default__' self.selected_root =", "Provides support for file uploads within the server-side browser window. @param req: The", "= function(value){\\n\", \" var e = $('#' + elementName + '-value-field');\\n\", \" e.val(value);\\n\",", "\"\"\" result, filename = self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[", "= 202 UL_ACCESS_DENIED = 203 FLD_EXISTS = 101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED =", "req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name) # //$value = str_replace(\"'\",", "fck_value = fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s' % self.name output = tags.script(type=\"text/javascript\")[[ \"var", "self.get_column_name(), '')) return frm fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))[''])", "\\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)), \"%s.Height =", "$('#' + elementName + '-value-field');\\n\", \" e.val(value);\\n\", \" e = $('#' + elementName", "\" var connector = '%s';\\n\" % self.get('fck_root', '/fck'), \" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\",", "current request @type req: L{modu.web.app.Request} \"\"\" result, filename = self.handle_upload(req, self.get_selected_root(req)) file_url =", "return output = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type =", "= fileitem.file.read(65536) uploaded_file.close() result = SUCCESS except: import traceback print traceback.print_exc() result =", "else: fck_value = str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value", "self.name) # //$value = str_replace(\"'\", '&apos;', $value); fck_value = getattr(storable, self.get_column_name(), '') if(fck_value", "fck_value), \"%s.Width = \\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\" % (fck_var,", "L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value if(value is ''): return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s)", "folder_path, filename) self.content_type = 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename)", "= \\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)), \"%s.Height", "- 2] += '-%d' % int(time.time()) filename = '.'.join(parts) result = UL_RENAME else:", "= fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close() result = SUCCESS except: import", "self.content = None self.template = None if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return", "(fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\", value=output) return frm def get_search_value(self,", "else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition): \"\"\" Select a file from a", "FCK server-side file browser. @param req: The current request @type req: L{modu.web.app.Request} \"\"\"", "browser window. @param req: The current request @type req: L{modu.web.app.Request} @param folder_path: The", "\"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename) ])] def handle_upload(self, req, folder_path): \"\"\" Pulls upload", "req: L{modu.web.app.Request} \"\"\" data = req.data if(req['REQUEST_METHOD'] == 'POST'): get_data = form.parse_query_string(req) else:", "if(style == 'listing' or self.get('read_only', False)): if not(default_value): default_value = '(none)' frm(type='label', value=default_value)", "size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t)", "if(folder_path is None): folder_path = '' elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url = self.selected_root['url_callback'](req,", "array from zope.interface import implements from modu import editable, assets, web from modu.persist", "elementName + '-value-field');\\n\", \" e.val(value);\\n\", \" e = $('#' + elementName + '-value-label');\\n\",", "self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\" Provides support for the FCK quick", "% self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\",", "if(req.postpath): root_key = req.postpath[0] else: root_key = '__default__' self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])):", "True, only list folders @type req: bool \"\"\" items = [] directory_path =", "content to be returned, although it will be None when using the template", "self.prepare_browser(req) def get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8' % self.content_type", "self.content_type def get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content return super(FCKEditorResource,", "\"\"\" items = [] directory_path = os.path.join(self.get_selected_root(req), folder_path) for item in os.listdir(directory_path): if(item.startswith('.')):", "content += self.get_directory_items(req, folder_path, True) elif(command_name == 'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path, False)", "per-site FCK configuration file. @param req: The current request @type req: L{modu.web.app.Request} \"\"\"", "\"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value", "Provides server-side support for FCKEditor. This resource implements the server-side portions of FCKEditor,", "output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content = [output] def prepare_config_request(self, req):", "The name of the folder to create @type new_folder_name: str \"\"\" directory_path =", "False)): if not(default_value): default_value = '(none)' frm(type='label', value=default_value) return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html')", "or content available.\") return admin_resource.select_template_root(req, template) def get_selected_root(self, req): if('root_callback' in self.selected_root): return", "102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition): \"\"\" A field type", "the FCK server-side file browser. @param req: The current request @type req: L{modu.web.app.Request}", "config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config", "the given folder. @param req: The current request @type req: L{modu.web.app.Request} @param folder_path:", "= 'access admin', root_callback = lambda req: os.path.join(req.approot, req.app.webroot), url_callback = lambda req,", "FCKFileField(define.definition): \"\"\" Select a file from a given directory and save its path", "[output] def prepare_config_request(self, req): \"\"\" Uses a Cheetah template to serve up the", "'.'.join(parts) result = UL_RENAME else: result = UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try: uploaded_file", "bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close() result = SUCCESS except:", "= 'text/html' self.content = None self.template = None if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'):", "= UL_RENAME else: result = UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try: uploaded_file = open(destination_path,", "FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition): \"\"\" A field type that displays the FCK", "self.name output = tags.script(type=\"text/javascript\")[[ \"var %s = new FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath']", "get_element(self, req, style, storable): default_value = getattr(storable, self.get_column_name(), '') frm = form.FormNode(self.name) if(style", "file. @type content: str \"\"\" def __init__(self, **options): self.allowed_roots = { '__default__' :", "self.allowed_roots['__default__']['perms'] if('url_callback' not in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def prepare_content(self,", "req): \"\"\" Uses a Cheetah template to serve up the per-site FCK configuration", "% (fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\"", "assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[", "code to support new folder creation. @param folder_path: The current folder, relative to", "[str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename) ])] def handle_upload(self, req, folder_path): \"\"\" Pulls", "@param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param folders_only:", "a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for t in items if t.tag", "\"\"\" Provides support for file uploads within the server-side browser window. @param req:", "self.content_type = 'text/html' self.content = None self.template = None if(req.postpath and req.postpath[0] ==", "self.content = [output] def prepare_config_request(self, req): \"\"\" Uses a Cheetah template to serve", "False)): frm(type='label', value=getattr(storable, self.get_column_name(), '')) return frm fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\",", "# See LICENSE for details \"\"\" Contains the FCK Editor support for modu.editable.", "self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result, file_url, filename) ])] def", "get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path is None): folder_path = ''", "current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param new_folder_name: The name of", "return content def create_folder(self, req, folder_path, new_folder_name): \"\"\" Used by browser code to", "= filename.split('.') if(len(parts) > 1): parts[len(parts) - 2] += '-%d' % int(time.time()) filename", "), } for key, config in options.get('allowed_roots', {}).items(): if('perms' not in config): config['perms']", "tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'): content += self.get_directory_items(req, folder_path, True) elif(command_name == 'GetFoldersAndFiles'):", "if(not folders_only): file_string = ''.join([str(t) for t in items if t.tag == 'File'])", "the folder to create @type new_folder_name: str \"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent", "self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8'", "Select a file from a given directory and save its path to the", "filename = self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req, folder_path, filename) self.content_type = 'text/html' self.content", "src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]' % (storable.get_table(),", "def __init__(self, **options): self.allowed_roots = { '__default__' : dict( perms = 'access admin',", "\"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm =", "= window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\",", "'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename) ])] def handle_upload(self, req,", "req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def get_template_root(self, req, template=None): \"\"\" @see:", "= '' elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path,", "@param folders_only: If True, only list folders @type req: bool \"\"\" items =", "% (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" %", "\"\"\" Provides server-side support for FCKEditor. This resource implements the server-side portions of", "\"\"\" import os, os.path, time, stat, shutil, array from zope.interface import implements from", "form.FormNode(self.name) if(style == 'listing'): frm(type='label', value='(html content)') return frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable,", "frm def get_search_value(self, value, req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value", "None): app.raise500(\"No template or content available.\") return admin_resource.select_template_root(req, template) def get_selected_root(self, req): if('root_callback'", "(c) 2006-2010 <NAME> # http://modu.bubblehouse.org # # # See LICENSE for details \"\"\"", "suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return frm def update_storable(self, req, frm, storable): form_name =", "return if(req.postpath): root_key = req.postpath[0] else: root_key = '__default__' self.selected_root = self.allowed_roots[root_key] if", "the FCK quick upload feature. @param req: The current request @type req: L{modu.web.app.Request}", "self.content_type = 'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path, folders_only): \"\"\" Used", "e.val(value);\\n\", \" };\\n\", \" var filemanager = '%s';\\n\" % filemgr_path, \" var connector", "= tags.script(type=\"text/javascript\")[[ \"var %s = new FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\"", "\" e = $('#' + elementName + '-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\", \"", "#prevent shenanigans new_folder_name = new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content =", "Pulls upload data out of the request and saves to the given folder.", "os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS)", "'FileUpload'): self.file_upload(req, folder_path) return else: return output = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output", "else: return output = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type", "modu # Copyright (c) 2006-2010 <NAME> # http://modu.bubblehouse.org # # # See LICENSE", "'File']) if(file_string): content += tags.Tag('Files')[file_string] return content def create_folder(self, req, folder_path, new_folder_name): \"\"\"", "@type content: str \"\"\" def __init__(self, **options): self.allowed_roots = { '__default__' : dict(", "Used by browser code to support new folder creation. @param folder_path: The current", "UL_RENAME else: result = UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try: uploaded_file = open(destination_path, 'w')", "(%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition): \"\"\" Select a file", "if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring() else: fck_value = str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n')", "of the request and saves to the given folder. @param req: The current", "fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var, fck_base_path),", "content available.\") return admin_resource.select_template_root(req, template) def get_selected_root(self, req): if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req)", "= folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'):", "= fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)): parts = filename.split('.') if(len(parts)", "= [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result, file_url, filename) ])] def prepare_browser(self,", "= tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'): content += self.get_directory_items(req, folder_path, True) elif(command_name ==", "to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result, filename = self.handle_upload(req, folder_path) file_url =", "else: result = UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try: uploaded_file = open(destination_path, 'w') bytes", "return frm def get_search_value(self, value, req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value =", "fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s' % self.name output =", "= str_replace(\"'\", '&apos;', $value); fck_value = getattr(storable, self.get_column_name(), '') if(fck_value is None): fck_value", "content: In most cases, the content to be returned, although it will be", "req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html' self.content = None self.template =", "None self.template = None if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key", "'<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content =", "in os.listdir(directory_path): if(item.startswith('.')): continue full_path = os.path.join(directory_path, item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item,", "value=output) return frm def get_search_value(self, value, req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value", "disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return frm def update_storable(self, req, frm,", "+= self.get_directory_items(req, folder_path, True) elif(command_name == 'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path, False) elif(command_name", "tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content = [output] def prepare_config_request(self, req): \"\"\" Uses", "'&apos;', $value); fck_value = getattr(storable, self.get_column_name(), '') if(fck_value is None): fck_value = ''", "file upload directory. @type selected_root: dict @ivar content_type: The content type to be", "if(self.name in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side", "value = value.value if(value is ''): return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\",", "= self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s',", "content = tags.Tag('Folders')[''.join([str(t) for t in items if t.tag == 'Folder'])] if(not folders_only):", "is None): fck_value = '' if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring() else: fck_value =", "time, stat, shutil, array from zope.interface import implements from modu import editable, assets,", "os.path.join(req.approot, req.app.webroot), url_callback = lambda req, *path: req.get_path(*path), ), } for key, config", "@see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8' % self.content_type def get_content(self, req): \"\"\" @see:", "self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return frm def update_storable(self, req,", "if t.tag == 'Folder'])] if(not folders_only): file_string = ''.join([str(t) for t in items", "LICENSE for details \"\"\" Contains the FCK Editor support for modu.editable. \"\"\" import", "None): template = self.get_template(req) if(template is None): app.raise500(\"No template or content available.\") return", "content += self.create_folder(req, folder_path, new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req, folder_path) return else: return", "req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8' % self.content_type def get_content(self, req):", "req.postpath[0] else: root_key = '__default__' self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and", "\"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\", value=output)", "tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \"", "available.\") return admin_resource.select_template_root(req, template) def get_selected_root(self, req): if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req) return", "form.parse_query_string(req) else: get_data = data command_name = get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name =", "'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s;", "= getattr(storable, self.get_column_name(), '') if(fck_value is None): fck_value = '' if(isinstance(fck_value, array.array)): fck_value", "str \"\"\" result, filename = self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req, folder_path, filename) self.content_type", "the FCK Editor support for modu.editable. \"\"\" import os, os.path, time, stat, shutil,", "@type content_type: str @ivar content: In most cases, the content to be returned,", "@see: L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value if(value is ''): return None if(self.get('fulltext_search')): return", "return '%s; charset=UTF-8' % self.content_type def get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content):", "req: bool \"\"\" items = [] directory_path = os.path.join(self.get_selected_root(req), folder_path) for item in", "@see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None): template = self.get_template(req) if(template is None): app.raise500(\"No", "fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'),", "be None when using the template to generate the FCK config file. @type", "form, tags from modu.web import resource, app SUCCESS = 0 CUSTOM_ERROR = 1", "shenanigans new_folder_name = new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS)", "UL_ACCESS_DENIED = 203 FLD_EXISTS = 101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR", "\"\"\" def __init__(self, **options): self.allowed_roots = { '__default__' : dict( perms = 'access", "self.allowed_roots[key] = config def prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html'", "\"\"\" result = UL_ACCESS_DENIED data = req.data fileitem = data['NewFile'] filename = fileitem.filename", "% (fck_var, self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" %", "shutil, array from zope.interface import implements from modu import editable, assets, web from", "folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'): content += self.get_directory_items(req, folder_path, True)", "% (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\", value=output) return frm def", "web from modu.persist import sql from modu.editable import define from modu.editable import resource", "if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self,", "\"\"\" Select a file from a given directory and save its path to", "'listing' or self.get('read_only', False)): if not(default_value): default_value = '(none)' frm(type='label', value=default_value) return frm", "import sql from modu.editable import define from modu.editable import resource as admin_resource from", "req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header',", "in req.data): form_data = req.data[form_name] if(self.name in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True", "L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None): template = self.get_template(req) if(template is None): app.raise500(\"No template", "folder_path = '' elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path) content =", "self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" %", "== 'GetFolders'): content += self.get_directory_items(req, folder_path, True) elif(command_name == 'GetFoldersAndFiles'): content += self.get_directory_items(req,", "% int(time.time()) filename = '.'.join(parts) result = UL_RENAME else: result = UL_INVALID_TYPE if(result", "parts = filename.split('.') if(len(parts) > 1): parts[len(parts) - 2] += '-%d' % int(time.time())", "!= UL_INVALID_TYPE): try: uploaded_file = open(destination_path, 'w') bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes", "= tags.Tag('Folders')[''.join([str(t) for t in items if t.tag == 'Folder'])] if(not folders_only): file_string", "items if t.tag == 'File']) if(file_string): content += tags.Tag('Files')[file_string] return content def create_folder(self,", "req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None): template = self.get_template(req) if(template", "options.get('allowed_roots', {}).items(): if('perms' not in config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not in config):", "name of the folder to create @type new_folder_name: str \"\"\" directory_path = os.path.join(self.get_selected_root(req),", "lambda req: os.path.join(req.approot, req.app.webroot), url_callback = lambda req, *path: req.get_path(*path), ), } for", "template to serve up the per-site FCK configuration file. @param req: The current", "type that displays the FCK rich text editor. \"\"\" implements(editable.IDatatype) def get_element(self, req,", "= tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\",", "= 'fck_%s' % self.name output = tags.script(type=\"text/javascript\")[[ \"var %s = new FCKeditor('%s');\\n\" %", "str \"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name = new_folder_name.split('/').pop() new_path =", "else: try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def", "given folder. @param req: The current request @type req: L{modu.web.app.Request} @param folder_path: The", "content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'): content += self.get_directory_items(req, folder_path, True) elif(command_name", "'access admin', root_callback = lambda req: os.path.join(req.approot, req.app.webroot), url_callback = lambda req, *path:", "support directory listing. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path:", "= os.path.join(self.get_selected_root(req), folder_path) for item in os.listdir(directory_path): if(item.startswith('.')): continue full_path = os.path.join(directory_path, item)", "fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s'", "@param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param new_folder_name:", "FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support for FCKEditor. This resource implements the server-side portions", "folder_path: str \"\"\" result, filename = self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req, folder_path, filename)", "= 103 FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition): \"\"\" A field type that displays", "= os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)): parts = filename.split('.') if(len(parts) > 1): parts[len(parts)", "req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name", "tags.Tag('Folders')[''.join([str(t) for t in items if t.tag == 'Folder'])] if(not folders_only): file_string =", "new_folder_name = new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else:", "returned, although it will be None when using the template to generate the", "\"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value if(value is ''): return None if(self.get('fulltext_search')):", "= self.allowed_roots['__default__']['perms'] if('url_callback' not in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def", "[value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition): \"\"\" Select a file from", "203 FLD_EXISTS = 101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR = 110", "self.get_column_name(), form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support for FCKEditor. This", "[str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result, file_url, filename) ])] def prepare_browser(self, req):", "\"function getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\", \" var e = $('#' + elementName", "True) elif(command_name == 'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path, False) elif(command_name == 'CreateFolder'): content", "@param req: The current request @type req: L{modu.web.app.Request} \"\"\" data = req.data if(req['REQUEST_METHOD']", "Uses a Cheetah template to serve up the per-site FCK configuration file. @param", "folder_path, folders_only): \"\"\" Used by browser code to support directory listing. @param folder_path:", "support new folder creation. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type", "server-side file browser. @ivar selected_root: Details for the file upload directory. @type selected_root:", "import os, os.path, time, stat, shutil, array from zope.interface import implements from modu", "to support directory listing. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type", "to be returned, although it will be None when using the template to", "style, storable): default_value = getattr(storable, self.get_column_name(), '') frm = form.FormNode(self.name) if(style == 'listing'", "os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)): parts = filename.split('.') if(len(parts) > 1): parts[len(parts) -", "};\\n\", \" var filemanager = '%s';\\n\" % filemgr_path, \" var connector = '%s';\\n\"", "L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content return super(FCKEditorResource, self).get_content(req) def get_template(self, req): \"\"\" @see:", "folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param folders_only: If", "server-side browser window. @param req: The current request @type req: L{modu.web.app.Request} @param folder_path:", "\"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result, file_url, filename) ])] def prepare_browser(self, req): \"\"\"", "new_folder_name: The name of the folder to create @type new_folder_name: str \"\"\" directory_path", "# http://modu.bubblehouse.org # # # See LICENSE for details \"\"\" Contains the FCK", "filename) ])] def handle_upload(self, req, folder_path): \"\"\" Pulls upload data out of the", "\"\"\" A field type that displays the FCK rich text editor. \"\"\" implements(editable.IDatatype)", "def get_element(self, req, style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name) if(style", "support for the FCK server-side file browser. @param req: The current request @type", "= 110 class FCKEditorField(define.definition): \"\"\" A field type that displays the FCK rich", "the content to be returned, although it will be None when using the", "prepare_browser(self, req): \"\"\" Provides support for the FCK server-side file browser. @param req:", "t in items if t.tag == 'Folder'])] if(not folders_only): file_string = ''.join([str(t) for", "'fckconfig-custom.js') fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name) # //$value = str_replace(\"'\", '&apos;', $value);", "\"\"\" if(template is None): template = self.get_template(req) if(template is None): app.raise500(\"No template or", "tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self, req, folder_path): \"\"\" Provides support for file uploads", "folder to save to, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result =", "self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else:", "tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\", \" var e = $('#' +", "'%s; charset=UTF-8' % self.content_type def get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return", "elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name", "os.path.join(directory_path, item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda", "cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for t in items if t.tag == 'Folder'])]", "FCKEditorField(define.definition): \"\"\" A field type that displays the FCK rich text editor. \"\"\"", "self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\"", "\"\"\" frm = form.FormNode(self.name) if(style == 'listing'): frm(type='label', value='(html content)') return frm if(self.get('read_only',", "charset=UTF-8' % self.content_type def get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content", "the server-side portions of FCKEditor, namely the image/file upload and server-side file browser.", "fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value", "req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content return super(FCKEditorResource, self).get_content(req) def get_template(self,", "depending on the particular paths accessed. @type content_type: str @ivar content: In most", "= \\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)),", "if(req.postpath and req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self, req): \"\"\" @see:", "directory. @type selected_root: dict @ivar content_type: The content type to be returned by", "to be returned by this resource, which changes depending on the particular paths", "**options): self.allowed_roots = { '__default__' : dict( perms = 'access admin', root_callback =", "current request @type req: L{modu.web.app.Request} \"\"\" data = req.data if(req['REQUEST_METHOD'] == 'POST'): get_data", "L{modu.web.app.Request} @param folder_path: The folder to save to, relative to C{self.selected_root['root_callback']} @type folder_path:", "get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path is None): folder_path = '' elif(folder_path.startswith('/')): folder_path =", "= req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\" % self.name)", "implements(editable.IDatatype) def get_element(self, req, style, storable): default_value = getattr(storable, self.get_column_name(), '') frm =", "self.get_directory_items(req, folder_path, True) elif(command_name == 'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path, False) elif(command_name ==", "CUSTOM_ERROR = 1 UL_RENAME = 201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED = 203 FLD_EXISTS", "get_data.get('CurrentFolder').value if(folder_path is None): folder_path = '' elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url =", "implements from modu import editable, assets, web from modu.persist import sql from modu.editable", "admin_resource from modu.util import form, tags from modu.web import resource, app SUCCESS =", "selected_root: Details for the file upload directory. @type selected_root: dict @ivar content_type: The", "be returned, although it will be None when using the template to generate", "folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param new_folder_name: The name of the", "new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content", "req.data): form_data = req.data[form_name] if(self.name in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True class", "\"\"\" Pulls upload data out of the request and saves to the given", "in items if t.tag == 'Folder'])] if(not folders_only): file_string = ''.join([str(t) for t", "output = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml'", "given directory and save its path to the Storable. \"\"\" implements(editable.IDatatype) def get_element(self,", "\"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html' self.content = None self.template = None", "get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8' % self.content_type def get_content(self,", "Used by browser code to support directory listing. @param folder_path: The current folder,", "\"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name = new_folder_name.split('/').pop() new_path = os.path.join(directory_path,", "@ivar content_type: The content type to be returned by this resource, which changes", "e = $('#' + elementName + '-value-field');\\n\", \" e.val(value);\\n\", \" e = $('#'", "a file from a given directory and save its path to the Storable.", "fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"',", "except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self, req, folder_path): \"\"\" Provides support", "value=getattr(storable, self.get_column_name(), '')) return frm fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor',", "particular paths accessed. @type content_type: str @ivar content: In most cases, the content", "The current request @type req: L{modu.web.app.Request} @param folder_path: The current folder, relative to", "])] def handle_upload(self, req, folder_path): \"\"\" Pulls upload data out of the request", "@type folder_path: str \"\"\" result = UL_ACCESS_DENIED data = req.data fileitem = data['NewFile']", "by this resource, which changes depending on the particular paths accessed. @type content_type:", "= UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try: uploaded_file = open(destination_path, 'w') bytes = fileitem.file.read(65536)", "be returned by this resource, which changes depending on the particular paths accessed.", "= config def prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html' self.content", "to C{self.selected_root['root_callback']} @type folder_path: str @param new_folder_name: The name of the folder to", "storable.get_table() if(form_name in req.data): form_data = req.data[form_name] if(self.name in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value)", "if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\" Provides support", "sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition): \"\"\" Select a file from a given directory", "get_directory_items(self, req, folder_path, folders_only): \"\"\" Used by browser code to support directory listing.", "url=folder_url) if(command_name == 'GetFolders'): content += self.get_directory_items(req, folder_path, True) elif(command_name == 'GetFoldersAndFiles'): content", "file from a given directory and save its path to the Storable. \"\"\"", "from modu.editable import define from modu.editable import resource as admin_resource from modu.util import", "\"var %s = new FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var,", "although it will be None when using the template to generate the FCK", "when using the template to generate the FCK config file. @type content: str", "if(req['REQUEST_METHOD'] == 'POST'): get_data = form.parse_query_string(req) else: get_data = data command_name = get_data.get('Command').value", "r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"', r'\\\"')", "= ''.join([str(t) for t in items if t.tag == 'File']) if(file_string): content +=", "out of the request and saves to the given folder. @param req: The", "os.F_OK)): parts = filename.split('.') if(len(parts) > 1): parts[len(parts) - 2] += '-%d' %", "__init__(self, **options): self.allowed_roots = { '__default__' : dict( perms = 'access admin', root_callback", "b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for t in items if t.tag == 'Folder'])] if(not", "up the per-site FCK configuration file. @param req: The current request @type req:", "# //$value = str_replace(\"'\", '&apos;', $value); fck_value = getattr(storable, self.get_column_name(), '') if(fck_value is", "= lambda req: os.path.join(req.approot, req.app.webroot), url_callback = lambda req, *path: req.get_path(*path), ), }", "= get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path is", "request @type req: L{modu.web.app.Request} \"\"\" data = req.data if(req['REQUEST_METHOD'] == 'POST'): get_data =", "folder. @param req: The current request @type req: L{modu.web.app.Request} @param folder_path: The folder", "'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name =", "'-value-field');\\n\", \" e.val(value);\\n\", \" e = $('#' + elementName + '-value-label');\\n\", \" e.val(value);\\n\",", "def prepare_browser(self, req): \"\"\" Provides support for the FCK server-side file browser. @param", "self.content_type = 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename) ])] def", "def prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html' self.content = None", "\\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\"", "= req.postpath[0] else: root_key = '__default__' self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath", "frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value if(value is ''): return None", "filename = '.'.join(parts) result = UL_RENAME else: result = UL_INVALID_TYPE if(result != UL_INVALID_TYPE):", "= 0 CUSTOM_ERROR = 1 UL_RENAME = 201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED =", "= None self.template = None if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath):", "= req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js')", "default_value = '(none)' frm(type='label', value=default_value) return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix =", "in config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key]", "'%s');\\n\" % (result, filename) ])] def handle_upload(self, req, folder_path): \"\"\" Pulls upload data", "{ '__default__' : dict( perms = 'access admin', root_callback = lambda req: os.path.join(req.approot,", "current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result, filename = self.handle_upload(req,", "import define from modu.editable import resource as admin_resource from modu.util import form, tags", "if(template is None): template = self.get_template(req) if(template is None): app.raise500(\"No template or content", "'%s';\\n\" % filemgr_path, \" var connector = '%s';\\n\" % self.get('fck_root', '/fck'), \" var", "connector = '%s';\\n\" % self.get('fck_root', '/fck'), \" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\",", "to the Storable. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): default_value = getattr(storable,", "content = tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR)", "for file uploads within the server-side browser window. @param req: The current request", "file browser. @ivar selected_root: Details for the file upload directory. @type selected_root: dict", "support for the FCK quick upload feature. @param req: The current request @type", "L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def get_template_root(self, req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template", "template or content available.\") return admin_resource.select_template_root(req, template) def get_selected_root(self, req): if('root_callback' in self.selected_root):", "in items if t.tag == 'File']) if(file_string): content += tags.Tag('Files')[file_string] return content def", "C{self.selected_root['root_callback']} @type folder_path: str @param new_folder_name: The name of the folder to create", "for modu.editable. \"\"\" import os, os.path, time, stat, shutil, array from zope.interface import", "self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type =", "the per-site FCK configuration file. @param req: The current request @type req: L{modu.web.app.Request}", "\"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content return super(FCKEditorResource, self).get_content(req) def get_template(self, req):", "create_folder(self, req, folder_path, new_folder_name): \"\"\" Used by browser code to support new folder", "req: L{modu.web.app.Request} @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str", "var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name,", "content_type: str @ivar content: In most cases, the content to be returned, although", "id='%s-select-button' % self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl =", "<NAME> # http://modu.bubblehouse.org # # # See LICENSE for details \"\"\" Contains the", "config in options.get('allowed_roots', {}).items(): if('perms' not in config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not", "'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path, False) elif(command_name == 'CreateFolder'): content += self.create_folder(req, folder_path,", "'GetFolders'): content += self.get_directory_items(req, folder_path, True) elif(command_name == 'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path,", "return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value]))", "implements the server-side portions of FCKEditor, namely the image/file upload and server-side file", "(fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" % (fck_var,", "'');\\n\" % (result, file_url, filename) ])] def prepare_browser(self, req): \"\"\" Provides support for", "frm(type='label', value=getattr(storable, self.get_column_name(), '')) return frm fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets',", "= 102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition): \"\"\" A field", "\"\"\" data = req.data if(req['REQUEST_METHOD'] == 'POST'): get_data = form.parse_query_string(req) else: get_data =", "value.value if(value is ''): return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else:", "\"\"\" value = value.value if(value is ''): return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST", "getattr(storable, self.get_column_name(), '') if(fck_value is None): fck_value = '' if(isinstance(fck_value, array.array)): fck_value =", "fck_value.tostring() else: fck_value = str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n')", "quick upload feature. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" result,", "Editor support for modu.editable. \"\"\" import os, os.path, time, stat, shutil, array from", "serve up the per-site FCK configuration file. @param req: The current request @type", "'text/xml' self.content = [output] def prepare_config_request(self, req): \"\"\" Uses a Cheetah template to", "= '.'.join(parts) result = UL_RENAME else: result = UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try:", "t.tag == 'File']) if(file_string): content += tags.Tag('Files')[file_string] return content def create_folder(self, req, folder_path,", "= '(none)' frm(type='label', value=default_value) return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\",", "req: The current request @type req: L{modu.web.app.Request} \"\"\" data = req.data if(req['REQUEST_METHOD'] ==", "(fck_var, self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var", "= '<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content", "''.join([str(t) for t in items if t.tag == 'File']) if(file_string): content += tags.Tag('Files')[file_string]", "upload and server-side file browser. @ivar selected_root: Details for the file upload directory.", "\" };\\n\", \" var filemanager = '%s';\\n\" % filemgr_path, \" var connector =", "% self.content_type def get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content return", "get_data = form.parse_query_string(req) else: get_data = data command_name = get_data.get('Command').value resource_type = get_data.get('Type').value", "folder_path: str @param new_folder_name: The name of the folder to create @type new_folder_name:", "within the server-side browser window. @param req: The current request @type req: L{modu.web.app.Request}", "This resource implements the server-side portions of FCKEditor, namely the image/file upload and", "= \\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')),", "prepare_quick_upload(self, req): \"\"\" Provides support for the FCK quick upload feature. @param req:", "103 FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition): \"\"\" A field type that displays the", "filename) ])] def prepare_browser(self, req): \"\"\" Provides support for the FCK server-side file", "def handle_upload(self, req, folder_path): \"\"\" Pulls upload data out of the request and", "win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\"))", "directory_path = os.path.join(self.get_selected_root(req), folder_path) for item in os.listdir(directory_path): if(item.startswith('.')): continue full_path = os.path.join(directory_path,", "\" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' %", "= fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s' % self.name output = tags.script(type=\"text/javascript\")[[ \"var %s", "# Copyright (c) 2006-2010 <NAME> # http://modu.bubblehouse.org # # # See LICENSE for", "app.raise500(\"No template or content available.\") return admin_resource.select_template_root(req, template) def get_selected_root(self, req): if('root_callback' in", "req: The current request @type req: L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript' self.template =", "self.get('read_only', False)): if not(default_value): default_value = '(none)' frm(type='label', value=default_value) return frm filemgr_path =", "self.prepare_config_request(req) return if(req.postpath): root_key = req.postpath[0] else: root_key = '__default__' self.selected_root = self.allowed_roots[root_key]", "If True, only list folders @type req: bool \"\"\" items = [] directory_path", "will be None when using the template to generate the FCK config file.", "= self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'): content += self.get_directory_items(req,", "else: get_data = data command_name = get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value", "file browser. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" data =", "os.listdir(directory_path): if(item.startswith('.')): continue full_path = os.path.join(directory_path, item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size", "Details for the file upload directory. @type selected_root: dict @ivar content_type: The content", "self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\" Provides support for the", "== 'listing' or self.get('read_only', False)): if not(default_value): default_value = '(none)' frm(type='label', value=default_value) return", "\"\"\" Provides support for the FCK server-side file browser. @param req: The current", "elif(command_name == 'FileUpload'): self.file_upload(req, folder_path) return else: return output = '<?xml version=\"1.0\" encoding=\"utf-8\"", "import editable, assets, web from modu.persist import sql from modu.editable import define from", "modu import editable, assets, web from modu.persist import sql from modu.editable import define", "directory listing. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str", "data out of the request and saves to the given folder. @param req:", "setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support for FCKEditor.", "os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self, req,", "\"%s.Width = \\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\" % (fck_var, self.get('height',", "[value])) class FCKFileField(define.definition): \"\"\" Select a file from a given directory and save", "self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def", "if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(), '')) return frm fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header',", "(fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\" % (fck_var,", "{}).items(): if('perms' not in config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not in config): config['url_callback']", "L{modu.web.app.Request} @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\"", "= data['NewFile'] filename = fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)): parts", "as admin_resource from modu.util import form, tags from modu.web import resource, app SUCCESS", "if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content", "= tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self, req, folder_path): \"\"\" Provides support for file", "current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param folders_only: If True, only", "modu.editable import resource as admin_resource from modu.util import form, tags from modu.web import", "# modu # Copyright (c) 2006-2010 <NAME> # http://modu.bubblehouse.org # # # See", "folders @type req: bool \"\"\" items = [] directory_path = os.path.join(self.get_selected_root(req), folder_path) for", "% (fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" %", "\"%s.Height = \\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set',", "super(FCKEditorResource, self).get_content(req) def get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def get_template_root(self,", "\"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None): template = self.get_template(req) if(template is None):", "list folders @type req: bool \"\"\" items = [] directory_path = os.path.join(self.get_selected_root(req), folder_path)", "get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def get_template_root(self, req, template=None): \"\"\"", "req, folder_path, folders_only): \"\"\" Used by browser code to support directory listing. @param", "from zope.interface import implements from modu import editable, assets, web from modu.persist import", "template) def get_selected_root(self, req): if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self,", "window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value,", "var filemanager = '%s';\\n\" % filemgr_path, \" var connector = '%s';\\n\" % self.get('fck_root',", "which changes depending on the particular paths accessed. @type content_type: str @ivar content:", "the FCK config file. @type content: str \"\"\" def __init__(self, **options): self.allowed_roots =", "else: root_key = '__default__' self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0]", "resource as admin_resource from modu.util import form, tags from modu.web import resource, app", "False) elif(command_name == 'CreateFolder'): content += self.create_folder(req, folder_path, new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req,", "filemanager = '%s';\\n\" % filemgr_path, \" var connector = '%s';\\n\" % self.get('fck_root', '/fck'),", "self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req, folder_path, filename) self.content_type = 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[", "filename) if(os.access(destination_path, os.F_OK)): parts = filename.split('.') if(len(parts) > 1): parts[len(parts) - 2] +=", "%s = new FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var, fck_custom_config),", "to save to, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result = UL_ACCESS_DENIED", "from modu import editable, assets, web from modu.persist import sql from modu.editable import", "tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content", ": dict( perms = 'access admin', root_callback = lambda req: os.path.join(req.approot, req.app.webroot), url_callback", "FCKEditor. This resource implements the server-side portions of FCKEditor, namely the image/file upload", "The content type to be returned by this resource, which changes depending on", "== 'CreateFolder'): content += self.create_folder(req, folder_path, new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req, folder_path) return", "support for modu.editable. \"\"\" import os, os.path, time, stat, shutil, array from zope.interface", "value, req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value if(value is ''):", "cases, the content to be returned, although it will be None when using", "configuration file. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" self.content_type =", "var connector = '%s';\\n\" % self.get('fck_root', '/fck'), \" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \"", "return self.template def get_template_root(self, req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None):", "@type req: L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self, req,", "\"\"\" self.content_type = 'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path, folders_only): \"\"\"", "folder_path) #prevent shenanigans new_folder_name = new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content", "% (result, filename) ])] def handle_upload(self, req, folder_path): \"\"\" Pulls upload data out", "is None): folder_path = '' elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path)", "to C{self.selected_root['root_callback']} @type folder_path: str @param folders_only: If True, only list folders @type", "for FCKEditor. This resource implements the server-side portions of FCKEditor, namely the image/file", "request @type req: L{modu.web.app.Request} \"\"\" result, filename = self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req,", "browser code to support new folder creation. @param folder_path: The current folder, relative", "== 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return", "\" e.val(value);\\n\", \" e = $('#' + elementName + '-value-label');\\n\", \" e.val(value);\\n\", \"", "form.FormNode(self.name) if(style == 'listing' or self.get('read_only', False)): if not(default_value): default_value = '(none)' frm(type='label',", "(result, file_url, filename) ])] def prepare_browser(self, req): \"\"\" Provides support for the FCK", "FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition): \"\"\" A", "directory and save its path to the Storable. \"\"\" implements(editable.IDatatype) def get_element(self, req,", "editable, assets, web from modu.persist import sql from modu.editable import define from modu.editable", "form_name = '%s-form' % storable.get_table() if(form_name in req.data): form_data = req.data[form_name] if(self.name in", "\"\"\" Provides support for the FCK quick upload feature. @param req: The current", "generate the FCK config file. @type content: str \"\"\" def __init__(self, **options): self.allowed_roots", "= self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type", "t in items if t.tag == 'File']) if(file_string): content += tags.Tag('Files')[file_string] return content", "folder_path): \"\"\" Pulls upload data out of the request and saves to the", "\"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name) if(style == 'listing'): frm(type='label', value='(html content)')", "portions of FCKEditor, namely the image/file upload and server-side file browser. @ivar selected_root:", "returned by this resource, which changes depending on the particular paths accessed. @type", "folder_path: The folder to save to, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\"", "'text/html' self.content = None self.template = None if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req)", "root_key = '__default__' self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0] ==", "request @type req: L{modu.web.app.Request} @param folder_path: The folder to save to, relative to", "fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close() result = SUCCESS except: import traceback", "folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result, filename", "get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path is None):", "fck_value = getattr(storable, self.get_column_name(), '') if(fck_value is None): fck_value = '' if(isinstance(fck_value, array.array)):", "(result, filename) ])] def handle_upload(self, req, folder_path): \"\"\" Pulls upload data out of", "template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None): template = self.get_template(req) if(template is", "'CreateFolder'): content += self.create_folder(req, folder_path, new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req, folder_path) return else:", "self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" %", "elif(command_name == 'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path, False) elif(command_name == 'CreateFolder'): content +=", "[] directory_path = os.path.join(self.get_selected_root(req), folder_path) for item in os.listdir(directory_path): if(item.startswith('.')): continue full_path =", "= get_data.get('CurrentFolder').value if(folder_path is None): folder_path = '' elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url", "else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for t in", "r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s' %", "folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param new_folder_name: The", "% self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\", \" var e", "= req.data fileitem = data['NewFile'] filename = fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path, filename)", "str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r')", "directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name = new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name)", "= self.get_template(req) if(template is None): app.raise500(\"No template or content available.\") return admin_resource.select_template_root(req, template)", "'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name)", "= form.parse_query_string(req) else: get_data = data command_name = get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name", "editor. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm", "'/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name) # //$value = str_replace(\"'\", '&apos;',", "return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name,", "update_storable(self, req, frm, storable): form_name = '%s-form' % storable.get_table() if(form_name in req.data): form_data", "None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class", "\\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)), \"%s.ToolbarSet", "form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support for", "perms = 'access admin', root_callback = lambda req: os.path.join(req.approot, req.app.webroot), url_callback = lambda", "req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key = req.postpath[0] else: root_key = '__default__'", "0 CUSTOM_ERROR = 1 UL_RENAME = 201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED = 203", "value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl", "L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path, folders_only):", "for item in os.listdir(directory_path): if(item.startswith('.')): continue full_path = os.path.join(directory_path, item) finfo = os.stat(full_path)", "items if t.tag == 'Folder'])] if(not folders_only): file_string = ''.join([str(t) for t in", "is None): app.raise500(\"No template or content available.\") return admin_resource.select_template_root(req, template) def get_selected_root(self, req):", "admin_resource.select_template_root(req, template) def get_selected_root(self, req): if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root'] def", "= UL_ACCESS_DENIED data = req.data fileitem = data['NewFile'] filename = fileitem.filename destination_path =", "'')) return frm fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config", "'%s', '%s', '');\\n\" % (result, file_url, filename) ])] def prepare_browser(self, req): \"\"\" Provides", "import form, tags from modu.web import resource, app SUCCESS = 0 CUSTOM_ERROR =", "if(os.access(destination_path, os.F_OK)): parts = filename.split('.') if(len(parts) > 1): parts[len(parts) - 2] += '-%d'", "self).get_content(req) def get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def get_template_root(self, req,", "% fck_var ]] frm(type=\"markup\", value=output) return frm def get_search_value(self, value, req, frm): \"\"\"", "'/fck'), \" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label'", "$value); fck_value = getattr(storable, self.get_column_name(), '') if(fck_value is None): fck_value = '' if(isinstance(fck_value,", "if(command_name == 'GetFolders'): content += self.get_directory_items(req, folder_path, True) elif(command_name == 'GetFoldersAndFiles'): content +=", "]] frm(type=\"markup\", value=output) return frm def get_search_value(self, value, req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()}", "See LICENSE for details \"\"\" Contains the FCK Editor support for modu.editable. \"\"\"", "= \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\", value=output) return", "modu.editable import define from modu.editable import resource as admin_resource from modu.util import form,", "a given directory and save its path to the Storable. \"\"\" implements(editable.IDatatype) def", "listing. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param", "config file. @type content: str \"\"\" def __init__(self, **options): self.allowed_roots = { '__default__'", "str @param folders_only: If True, only list folders @type req: bool \"\"\" items", "not in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def prepare_content(self, req): \"\"\"", "the server-side browser window. @param req: The current request @type req: L{modu.web.app.Request} @param", "bytes = fileitem.file.read(65536) uploaded_file.close() result = SUCCESS except: import traceback print traceback.print_exc() result", "% filemgr_path, \" var connector = '%s';\\n\" % self.get('fck_root', '/fck'), \" var win", "dict( perms = 'access admin', root_callback = lambda req: os.path.join(req.approot, req.app.webroot), url_callback =", "fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s' % self.name", "data = req.data fileitem = data['NewFile'] filename = fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path,", "content += tags.Tag('Files')[file_string] return content def create_folder(self, req, folder_path, new_folder_name): \"\"\" Used by", "prepare_config_request(self, req): \"\"\" Uses a Cheetah template to serve up the per-site FCK", "FCKEditor, namely the image/file upload and server-side file browser. @ivar selected_root: Details for", "try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self,", "req, folder_path): \"\"\" Pulls upload data out of the request and saves to", "frm fck_base_path = req.get_path('assets', 'fckeditor') req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root',", "config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\"", "% storable.get_table() if(form_name in req.data): form_data = req.data[form_name] if(self.name in form_data): setattr(storable, self.get_column_name(),", "try: uploaded_file = open(destination_path, 'w') bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536)", "= open(destination_path, 'w') bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close() result", "folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'): content", "from modu.persist import sql from modu.editable import define from modu.editable import resource as", "FCK config file. @type content: str \"\"\" def __init__(self, **options): self.allowed_roots = {", "def get_template_root(self, req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None): template =", "file_url, filename) ])] def prepare_browser(self, req): \"\"\" Provides support for the FCK server-side", "def get_directory_items(self, req, folder_path, folders_only): \"\"\" Used by browser code to support directory", "function(value){\\n\", \" var e = $('#' + elementName + '-value-field');\\n\", \" e.val(value);\\n\", \"", "server-side portions of FCKEditor, namely the image/file upload and server-side file browser. @ivar", "@type new_folder_name: str \"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name = new_folder_name.split('/').pop()", "= self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result, file_url,", "The current request @type req: L{modu.web.app.Request} @param folder_path: The folder to save to,", "*path: req.get_path(*path), ), } for key, config in options.get('allowed_roots', {}).items(): if('perms' not in", "'fck_%s' % self.name output = tags.script(type=\"text/javascript\")[[ \"var %s = new FCKeditor('%s');\\n\" % (fck_var,", "'(none)' frm(type='label', value=default_value) return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\",", "2006-2010 <NAME> # http://modu.bubblehouse.org # # # See LICENSE for details \"\"\" Contains", "server-side support for FCKEditor. This resource implements the server-side portions of FCKEditor, namely", "finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b:", "@type folder_path: str @param new_folder_name: The name of the folder to create @type", "req.data[form_name] if(self.name in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides", "2] += '-%d' % int(time.time()) filename = '.'.join(parts) result = UL_RENAME else: result", "self.get_directory_items(req, folder_path, False) elif(command_name == 'CreateFolder'): content += self.create_folder(req, folder_path, new_folder_name) elif(command_name ==", "creation. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param", "folder_path: str \"\"\" result = UL_ACCESS_DENIED data = req.data fileitem = data['NewFile'] filename", "get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\" if(self.content): return self.content return super(FCKEditorResource, self).get_content(req) def", "self.template def get_template_root(self, req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None): template", "array.array)): fck_value = fck_value.tostring() else: fck_value = str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value", "@type req: L{modu.web.app.Request} \"\"\" data = req.data if(req['REQUEST_METHOD'] == 'POST'): get_data = form.parse_query_string(req)", "get_template_root(self, req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is None): template = self.get_template(req)", "Cheetah template to serve up the per-site FCK configuration file. @param req: The", "result = UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try: uploaded_file = open(destination_path, 'w') bytes =", "= req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name) # //$value =", "os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name = new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)):", "'__default__' : dict( perms = 'access admin', root_callback = lambda req: os.path.join(req.approot, req.app.webroot),", "field type that displays the FCK rich text editor. \"\"\" implements(editable.IDatatype) def get_element(self,", "is ''): return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s,", "template to generate the FCK config file. @type content: str \"\"\" def __init__(self,", "prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html' self.content = None self.template", "get_selected_root(self, req): if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\"", "b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for t in items if t.tag ==", "data['NewFile'] filename = fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)): parts =", "resource_type = get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path is None): folder_path", "self.create_folder(req, folder_path, new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req, folder_path) return else: return output =", "and server-side file browser. @ivar selected_root: Details for the file upload directory. @type", "?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content = [output] def prepare_config_request(self,", "config def prepare_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html' self.content =", "FCK Editor support for modu.editable. \"\"\" import os, os.path, time, stat, shutil, array", "In most cases, the content to be returned, although it will be None", "form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support for FCKEditor. This resource", "to create @type new_folder_name: str \"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name", "= data command_name = get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path =", "fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\" % (fck_var, self.get('width',", "self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\", \" var e =", "browser. @ivar selected_root: Details for the file upload directory. @type selected_root: dict @ivar", "var e = $('#' + elementName + '-value-field');\\n\", \" e.val(value);\\n\", \" e =", "tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self, req, folder_path): \"\"\" Provides", "= 'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path, folders_only): \"\"\" Used by", "self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename) ])] def handle_upload(self, req, folder_path):", "= [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename) ])] def handle_upload(self, req, folder_path): \"\"\"", "items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for t in items if", "= [] directory_path = os.path.join(self.get_selected_root(req), folder_path) for item in os.listdir(directory_path): if(item.startswith('.')): continue full_path", "if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except: content", "self.get('fck_root', '/fck'), \" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value,", "\" window.SetUrl = function(value){\\n\", \" var e = $('#' + elementName + '-value-field');\\n\",", "else: self.prepare_browser(req) def get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8' %", "(fck_var, self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\"", "self.content_type = 'text/xml' self.content = [output] def prepare_config_request(self, req): \"\"\" Uses a Cheetah", "not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self, req):", "FCK quick upload feature. @param req: The current request @type req: L{modu.web.app.Request} \"\"\"", "file uploads within the server-side browser window. @param req: The current request @type", "1 UL_RENAME = 201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED = 203 FLD_EXISTS = 101", "self.selected_root['url_callback'](req, folder_path, filename) self.content_type = 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result,", "file_string = ''.join([str(t) for t in items if t.tag == 'File']) if(file_string): content", "os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except: content =", "form_data = req.data[form_name] if(self.name in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource):", "C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result, filename = self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req,", "root_key = req.postpath[0] else: root_key = '__default__' self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403()", "bool \"\"\" items = [] directory_path = os.path.join(self.get_selected_root(req), folder_path) for item in os.listdir(directory_path):", "True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support for FCKEditor. This resource implements the", "= os.path.join(directory_path, item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item))", "most cases, the content to be returned, although it will be None when", "def get_search_value(self, value, req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value if(value", "new_folder_name): \"\"\" Used by browser code to support new folder creation. @param folder_path:", "frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return frm def update_storable(self, req, frm, storable):", "the FCK rich text editor. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): \"\"\"", "template = self.get_template(req) if(template is None): app.raise500(\"No template or content available.\") return admin_resource.select_template_root(req,", "not in config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback']", "req): \"\"\" Provides support for the FCK quick upload feature. @param req: The", "browser code to support directory listing. @param folder_path: The current folder, relative to", "filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result, file_url, filename) ])]", "filename = fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)): parts = filename.split('.')", "self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\", \"", "\"\"\" return self.template def get_template_root(self, req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" if(template is", "\"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def get_template_root(self, req, template=None): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()}", "The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param new_folder_name: The name", "class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support for FCKEditor. This resource implements the server-side", "\"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\", value=output) return frm def get_search_value(self, value, req, frm):", "self.template = None if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key =", "default_value = getattr(storable, self.get_column_name(), '') frm = form.FormNode(self.name) if(style == 'listing' or self.get('read_only',", "tags.script(type=\"text/javascript\")[[ \"var %s = new FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" %", "self.file_upload(req, folder_path) return else: return output = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output +=", "req: L{modu.web.app.Request} \"\"\" result, filename = self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename) self.content", "L{modu.web.app.Request} \"\"\" data = req.data if(req['REQUEST_METHOD'] == 'POST'): get_data = form.parse_query_string(req) else: get_data", "r'\\\"') fck_var = 'fck_%s' % self.name output = tags.script(type=\"text/javascript\")[[ \"var %s = new", "import implements from modu import editable, assets, web from modu.persist import sql from", "paths accessed. @type content_type: str @ivar content: In most cases, the content to", "]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name))", "version=\"1.0\" encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content = [output]", "= 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename) ])] def handle_upload(self,", "if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key = req.postpath[0] else: root_key", "output = tags.script(type=\"text/javascript\")[[ \"var %s = new FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] =", "folder_path = folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name ==", "new_folder_name = get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path is None): folder_path = '' elif(folder_path.startswith('/')):", "req: The current request @type req: L{modu.web.app.Request} \"\"\" result, filename = self.handle_upload(req, self.get_selected_root(req))", "The folder to save to, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result", "'%s-form' % storable.get_table() if(form_name in req.data): form_data = req.data[form_name] if(self.name in form_data): setattr(storable,", "folders_only): file_string = ''.join([str(t) for t in items if t.tag == 'File']) if(file_string):", "+ '-value-field');\\n\", \" e.val(value);\\n\", \" e = $('#' + elementName + '-value-label');\\n\", \"", "folder creation. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str", "\\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\"", "fileitem = data['NewFile'] filename = fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)):", "in options.get('allowed_roots', {}).items(): if('perms' not in config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not in", "os.path, time, stat, shutil, array from zope.interface import implements from modu import editable,", "The current request @type req: L{modu.web.app.Request} \"\"\" data = req.data if(req['REQUEST_METHOD'] == 'POST'):", "return frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(), '')) return frm fck_base_path = req.get_path('assets',", "attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return frm def", "return self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\" Provides support for the FCK quick upload", "storable): form_name = '%s-form' % storable.get_table() if(form_name in req.data): form_data = req.data[form_name] if(self.name", "folder_path, new_folder_name): \"\"\" Used by browser code to support new folder creation. @param", "fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" % (fck_var, fck_value),", "self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var ]]", "save to, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result = UL_ACCESS_DENIED data", "destination_path = os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)): parts = filename.split('.') if(len(parts) > 1):", "class FCKFileField(define.definition): \"\"\" Select a file from a given directory and save its", "= form.FormNode(self.name) if(style == 'listing'): frm(type='label', value='(html content)') return frm if(self.get('read_only', False)): frm(type='label',", "== 'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path, False) elif(command_name == 'CreateFolder'): content += self.create_folder(req,", "and saves to the given folder. @param req: The current request @type req:", "fck_value = fck_value.tostring() else: fck_value = str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value =", "filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\" %", "= '%s-form[%s]' % (storable.get_table(), self.name) # //$value = str_replace(\"'\", '&apos;', $value); fck_value =", "return self.content return super(FCKEditorResource, self).get_content(req) def get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return", "fck_value = str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value =", "% (fck_var, self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)), \"%s.ToolbarSet =", "\" var e = $('#' + elementName + '-value-field');\\n\", \" e.val(value);\\n\", \" e", "new_folder_name) if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except:", "by browser code to support directory listing. @param folder_path: The current folder, relative", "file_upload(self, req, folder_path): \"\"\" Provides support for file uploads within the server-side browser", "C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result = UL_ACCESS_DENIED data = req.data fileitem =", "folder_path, False) elif(command_name == 'CreateFolder'): content += self.create_folder(req, folder_path, new_folder_name) elif(command_name == 'FileUpload'):", "for the file upload directory. @type selected_root: dict @ivar content_type: The content type", "'' elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url)", "+= self.create_folder(req, folder_path, new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req, folder_path) return else: return output", "= SUCCESS except: import traceback print traceback.print_exc() result = UL_ACCESS_DENIED return result, filename", "folder_path, filename) if(os.access(destination_path, os.F_OK)): parts = filename.split('.') if(len(parts) > 1): parts[len(parts) - 2]", "get_element(self, req, style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name) if(style ==", "new_folder_name: str \"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name = new_folder_name.split('/').pop() new_path", "content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self, req, folder_path): \"\"\" Provides support for", "request and saves to the given folder. @param req: The current request @type", "\"\"\" self.content_type = 'text/html' self.content = None self.template = None if(req.postpath and req.postpath[0]", "get_data = data command_name = get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path", "req.get_path(*path), ), } for key, config in options.get('allowed_roots', {}).items(): if('perms' not in config):", "content = tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self, req, folder_path):", "support for file uploads within the server-side browser window. @param req: The current", "stat, shutil, array from zope.interface import implements from modu import editable, assets, web", "url_callback = lambda req, *path: req.get_path(*path), ), } for key, config in options.get('allowed_roots',", "server-side file browser. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" data", "\" e.val(value);\\n\", \" };\\n\", \" var filemanager = '%s';\\n\" % filemgr_path, \" var", "zope.interface import implements from modu import editable, assets, web from modu.persist import sql", "modu.web import resource, app SUCCESS = 0 CUSTOM_ERROR = 1 UL_RENAME = 201", "@type req: L{modu.web.app.Request} @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path:", "\" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix,", "import resource, app SUCCESS = 0 CUSTOM_ERROR = 1 UL_RENAME = 201 UL_INVALID_TYPE", "return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition): \"\"\"", "uploads within the server-side browser window. @param req: The current request @type req:", "or self.get('read_only', False)): if not(default_value): default_value = '(none)' frm(type='label', value=default_value) return frm filemgr_path", "self.get_column_name(), '') if(fck_value is None): fck_value = '' if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring()", "new FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath =", "1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for t", "file_url = self.selected_root['url_callback'](req, folder_path, filename) self.content_type = 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\"", "@param req: The current request @type req: L{modu.web.app.Request} \"\"\" result, filename = self.handle_upload(req,", "folder_path: str @param folders_only: If True, only list folders @type req: bool \"\"\"", "= os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content =", "self.content return super(FCKEditorResource, self).get_content(req) def get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template", "= None if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key = req.postpath[0]", "folder_path) file_url = self.selected_root['url_callback'](req, folder_path, filename) self.content_type = 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s,", "= req.data[form_name] if(self.name in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\"", "])] def prepare_browser(self, req): \"\"\" Provides support for the FCK server-side file browser.", "sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition): \"\"\" Select", "e = $('#' + elementName + '-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\", \" var", "filemgr_path, \" var connector = '%s';\\n\" % self.get('fck_root', '/fck'), \" var win =", "//$value = str_replace(\"'\", '&apos;', $value); fck_value = getattr(storable, self.get_column_name(), '') if(fck_value is None):", "} for key, config in options.get('allowed_roots', {}).items(): if('perms' not in config): config['perms'] =", "201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED = 203 FLD_EXISTS = 101 FLD_INVALID_NAME = 102", "elementName + '-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\", \" var filemanager = '%s';\\n\" %", "folder_path) return else: return output = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name,", "a Cheetah template to serve up the per-site FCK configuration file. @param req:", "frm(type='label', value='(html content)') return frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(), '')) return frm", "storable): default_value = getattr(storable, self.get_column_name(), '') frm = form.FormNode(self.name) if(style == 'listing' or", "def prepare_config_request(self, req): \"\"\" Uses a Cheetah template to serve up the per-site", "fck_var ]] frm(type=\"markup\", value=output) return frm def get_search_value(self, value, req, frm): \"\"\" @see:", "folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param folders_only: If True, only list", "SUCCESS = 0 CUSTOM_ERROR = 1 UL_RENAME = 201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED", "if(file_string): content += tags.Tag('Files')[file_string] return content def create_folder(self, req, folder_path, new_folder_name): \"\"\" Used", "only list folders @type req: bool \"\"\" items = [] directory_path = os.path.join(self.get_selected_root(req),", "\\\"%s\\\";\\n\" % (fck_var, self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\", value=output) return frm", "\"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): default_value = getattr(storable, self.get_column_name(), '') frm", "@param new_folder_name: The name of the folder to create @type new_folder_name: str \"\"\"", "current request @type req: L{modu.web.app.Request} @param folder_path: The folder to save to, relative", "# # # See LICENSE for details \"\"\" Contains the FCK Editor support", "text editor. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\"", "Storable. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): default_value = getattr(storable, self.get_column_name(), '')", "storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name) if(style == 'listing'): frm(type='label', value='(html", "fck_value = '' if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring() else: fck_value = str(fck_value) fck_value", "return content def file_upload(self, req, folder_path): \"\"\" Provides support for file uploads within", "fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name) # //$value = str_replace(\"'\", '&apos;', $value); fck_value", "'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path, folders_only): \"\"\" Used by browser", "for the FCK server-side file browser. @param req: The current request @type req:", "using the template to generate the FCK config file. @type content: str \"\"\"", "filename = self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s',", "req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\" value = value.value if(value is ''): return", "filename.split('.') if(len(parts) > 1): parts[len(parts) - 2] += '-%d' % int(time.time()) filename =", "return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support for FCKEditor. This resource implements", "value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return frm", "os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower()))", "= { '__default__' : dict( perms = 'access admin', root_callback = lambda req:", "\"\"\" Used by browser code to support directory listing. @param folder_path: The current", "frm, storable): form_name = '%s-form' % storable.get_table() if(form_name in req.data): form_data = req.data[form_name]", "getattr(storable, self.get_column_name(), '') frm = form.FormNode(self.name) if(style == 'listing' or self.get('read_only', False)): if", "%s)\", [value])) class FCKFileField(define.definition): \"\"\" Select a file from a given directory and", "return frm def update_storable(self, req, frm, storable): form_name = '%s-form' % storable.get_table() if(form_name", "new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path)", "it will be None when using the template to generate the FCK config", "\"\"\" Contains the FCK Editor support for modu.editable. \"\"\" import os, os.path, time,", "Contains the FCK Editor support for modu.editable. \"\"\" import os, os.path, time, stat,", "elif(command_name == 'CreateFolder'): content += self.create_folder(req, folder_path, new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req, folder_path)", "fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name) # //$value", "support for FCKEditor. This resource implements the server-side portions of FCKEditor, namely the", "request @type req: L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self,", "req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\", \" var e = $('#'", "FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition): \"\"\" A field type that", "fck_var = 'fck_%s' % self.name output = tags.script(type=\"text/javascript\")[[ \"var %s = new FCKeditor('%s');\\n\"", "from modu.util import form, tags from modu.web import resource, app SUCCESS = 0", "= $('#' + elementName + '-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\", \" var filemanager", "== 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key = req.postpath[0] else: root_key = '__default__' self.selected_root", "the template to generate the FCK config file. @type content: str \"\"\" def", "str @param new_folder_name: The name of the folder to create @type new_folder_name: str", "value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return frm def update_storable(self, req, frm, storable): form_name", "= 203 FLD_EXISTS = 101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR =", "= fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"', r'\\\"') fck_var =", "req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\"", "content type to be returned by this resource, which changes depending on the", "+= self.get_directory_items(req, folder_path, False) elif(command_name == 'CreateFolder'): content += self.create_folder(req, folder_path, new_folder_name) elif(command_name", "== 'FileUpload'): self.file_upload(req, folder_path) return else: return output = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>'", "def file_upload(self, req, folder_path): \"\"\" Provides support for file uploads within the server-side", "req): if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\" Provides", "admin', root_callback = lambda req: os.path.join(req.approot, req.app.webroot), url_callback = lambda req, *path: req.get_path(*path),", "self.allowed_roots = { '__default__' : dict( perms = 'access admin', root_callback = lambda", "@param req: The current request @type req: L{modu.web.app.Request} @param folder_path: The current folder,", "the request and saves to the given folder. @param req: The current request", "req: L{modu.web.app.Request} @param folder_path: The folder to save to, relative to C{self.selected_root['root_callback']} @type", "'w') bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close() result = SUCCESS", "= str(fck_value) fck_value = fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\",", "to the given folder. @param req: The current request @type req: L{modu.web.app.Request} @param", "\"%s.Value = \\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)),", "if('perms' not in config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not in config): config['url_callback'] =", "sql from modu.editable import define from modu.editable import resource as admin_resource from modu.util", "relative to C{self.selected_root['root_callback']} @type folder_path: str @param folders_only: If True, only list folders", "fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path, os.F_OK)): parts = filename.split('.') if(len(parts) >", "if(len(parts) > 1): parts[len(parts) - 2] += '-%d' % int(time.time()) filename = '.'.join(parts)", "'' if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring() else: fck_value = str(fck_value) fck_value = fck_value.replace(\"\\r\\n\",", "if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value])) else: return sql.RAW(sql.interp(\"INSTR(%%s, %s)\", [value])) class FCKFileField(define.definition):", "if('url_callback' not in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def prepare_content(self, req):", "win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field'", "UL_ACCESS_DENIED data = req.data fileitem = data['NewFile'] filename = fileitem.filename destination_path = os.path.join(self.get_selected_root(req),", "UL_RENAME = 201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED = 203 FLD_EXISTS = 101 FLD_INVALID_NAME", "getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\", \" var e = $('#' + elementName +", "FCK configuration file. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" self.content_type", "folder_url = self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'): content +=", "while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close() result = SUCCESS except: import traceback print", "= req.data if(req['REQUEST_METHOD'] == 'POST'): get_data = form.parse_query_string(req) else: get_data = data command_name", "req: The current request @type req: L{modu.web.app.Request} @param folder_path: The folder to save", "path to the Storable. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): default_value =", "self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path, folders_only): \"\"\" Used by browser code", "= fck_value.replace(\"\\r\\n\", r'\\r\\n') fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value =", "== 'File']) if(file_string): content += tags.Tag('Files')[file_string] return content def create_folder(self, req, folder_path, new_folder_name):", "by browser code to support new folder creation. @param folder_path: The current folder,", "'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path, folders_only): \"\"\" Used by browser code to support", "item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a,", "'-%d' % int(time.time()) filename = '.'.join(parts) result = UL_RENAME else: result = UL_INVALID_TYPE", "encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content = [output] def", "folder_path, new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req, folder_path) return else: return output = '<?xml", "if(style == 'listing'): frm(type='label', value='(html content)') return frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(),", "+= tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content = [output] def prepare_config_request(self, req): \"\"\"", "= value.value if(value is ''): return None if(self.get('fulltext_search')): return sql.RAW(sql.interp(\"MATCH(%%s) AGAINST (%s)\", [value]))", "% (storable.get_table(), self.name) # //$value = str_replace(\"'\", '&apos;', $value); fck_value = getattr(storable, self.get_column_name(),", "'%s-form[%s]' % (storable.get_table(), self.name) # //$value = str_replace(\"'\", '&apos;', $value); fck_value = getattr(storable,", "attributes=dict(id='%s-value-field' % self.name)) return frm def update_storable(self, req, frm, storable): form_name = '%s-form'", "req, frm, storable): form_name = '%s-form' % storable.get_table() if(form_name in req.data): form_data =", "C{self.selected_root['root_callback']} @type folder_path: str @param folders_only: If True, only list folders @type req:", "data command_name = get_data.get('Command').value resource_type = get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value", "window.SetUrl = function(value){\\n\", \" var e = $('#' + elementName + '-value-field');\\n\", \"", "\"\"\" Used by browser code to support new folder creation. @param folder_path: The", "for t in items if t.tag == 'File']) if(file_string): content += tags.Tag('Files')[file_string] return", "for the FCK quick upload feature. @param req: The current request @type req:", "\"}\\n\", ]]) frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' %", "+= tags.Tag('Files')[file_string] return content def create_folder(self, req, folder_path, new_folder_name): \"\"\" Used by browser", "resource, which changes depending on the particular paths accessed. @type content_type: str @ivar", "frm = form.FormNode(self.name) if(style == 'listing'): frm(type='label', value='(html content)') return frm if(self.get('read_only', False)):", "file. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript'", "110 class FCKEditorField(define.definition): \"\"\" A field type that displays the FCK rich text", "items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content =", "uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close() result = SUCCESS except: import traceback print traceback.print_exc()", "@param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result,", "self.get_column_name(), '') frm = form.FormNode(self.name) if(style == 'listing' or self.get('read_only', False)): if not(default_value):", "and req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content_type()}", "onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function getFile(elementName){\\n\", \" window.SetUrl = function(value){\\n\", \" var", "req.content.report('header', tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]'", "The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str @param folders_only: If True,", "namely the image/file upload and server-side file browser. @ivar selected_root: Details for the", "\"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width", "fck_value = fck_value.replace(\"\\n\", r'\\n') fck_value = fck_value.replace(\"\\r\", r'\\r') fck_value = fck_value.replace('\"', r'\\\"') fck_var", "@param folder_path: The folder to save to, relative to C{self.selected_root['root_callback']} @type folder_path: str", "@param req: The current request @type req: L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript' self.template", "// 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for", "req.data fileitem = data['NewFile'] filename = fileitem.filename destination_path = os.path.join(self.get_selected_root(req), folder_path, filename) if(os.access(destination_path,", "L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name) if(style == 'listing'): frm(type='label', value='(html content)') return frm", "frm(type=\"markup\", value=output) return frm def get_search_value(self, value, req, frm): \"\"\" @see: L{modu.editable.define.definition.get_search_value()} \"\"\"", "on the particular paths accessed. @type content_type: str @ivar content: In most cases,", "from modu.editable import resource as admin_resource from modu.util import form, tags from modu.web", "upload data out of the request and saves to the given folder. @param", "= new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path, os.F_OK)): content = tags.Tag('Error')(number=FLD_EXISTS) else: try:", "'__default__' self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0] == 'upload'): self.prepare_quick_upload(req)", "frm(type='label', value=default_value) return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req) suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button'", "> 1): parts[len(parts) - 2] += '-%d' % int(time.time()) filename = '.'.join(parts) result", "+= '-%d' % int(time.time()) filename = '.'.join(parts) result = UL_RENAME else: result =", "'listing'): frm(type='label', value='(html content)') return frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(), '')) return", "FLD_EXISTS = 101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR = 110 class", "modu.editable. \"\"\" import os, os.path, time, stat, shutil, array from zope.interface import implements", "str_replace(\"'\", '&apos;', $value); fck_value = getattr(storable, self.get_column_name(), '') if(fck_value is None): fck_value =", "rich text editor. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()}", "frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(), '')) return frm fck_base_path = req.get_path('assets', 'fckeditor')", "dict @ivar content_type: The content type to be returned by this resource, which", "app.raise403() if(req.postpath and req.postpath[0] == 'upload'): self.prepare_quick_upload(req) else: self.prepare_browser(req) def get_content_type(self, req): \"\"\"", "\"\"\" return '%s; charset=UTF-8' % self.content_type def get_content(self, req): \"\"\" @see: L{modu.web.resource.IContent.get_content()} \"\"\"", "@type req: bool \"\"\" items = [] directory_path = os.path.join(self.get_selected_root(req), folder_path) for item", "of the folder to create @type new_folder_name: str \"\"\" directory_path = os.path.join(self.get_selected_root(req), folder_path)", "frm def update_storable(self, req, frm, storable): form_name = '%s-form' % storable.get_table() if(form_name in", "browser. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" data = req.data", "if(fck_value is None): fck_value = '' if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring() else: fck_value", "key, config in options.get('allowed_roots', {}).items(): if('perms' not in config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback'", "FCK rich text editor. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): \"\"\" @see:", "def get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def get_template_root(self, req, template=None):", "return self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\" Provides support for the FCK", "window. @param req: The current request @type req: L{modu.web.app.Request} @param folder_path: The current", "filename) self.content_type = 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename) ])]", "image/file upload and server-side file browser. @ivar selected_root: Details for the file upload", "root_callback = lambda req: os.path.join(req.approot, req.app.webroot), url_callback = lambda req, *path: req.get_path(*path), ),", "frm['label'](type=\"textfield\", value=default_value, attributes=dict(id='%s-value-label' % self.name, disabled=\"1\")) frm['value'](type=\"hidden\", value=default_value, suffix=suffix, attributes=dict(id='%s-value-field' % self.name)) return", "displays the FCK rich text editor. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable):", "@type selected_root: dict @ivar content_type: The content type to be returned by this", "L{modu.web.app.Request} \"\"\" result, filename = self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename) self.content =", "The current request @type req: L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript' self.template = 'fckconfig-custom.js.tmpl'", "parts[len(parts) - 2] += '-%d' % int(time.time()) filename = '.'.join(parts) result = UL_RENAME", "101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition): \"\"\"", "os, os.path, time, stat, shutil, array from zope.interface import implements from modu import", "selected_root: dict @ivar content_type: The content type to be returned by this resource,", "req: L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path,", "new_folder_name) elif(command_name == 'FileUpload'): self.file_upload(req, folder_path) return else: return output = '<?xml version=\"1.0\"", "req, style, storable): \"\"\" @see: L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name) if(style == 'listing'):", "relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result, filename = self.handle_upload(req, folder_path) file_url", "current request @type req: L{modu.web.app.Request} @param folder_path: The current folder, relative to C{self.selected_root['root_callback']}", "self.selected_root['root'] def prepare_quick_upload(self, req): \"\"\" Provides support for the FCK quick upload feature.", "accessed. @type content_type: str @ivar content: In most cases, the content to be", "config): config['perms'] = self.allowed_roots['__default__']['perms'] if('url_callback' not in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] =", "the image/file upload and server-side file browser. @ivar selected_root: Details for the file", "details \"\"\" Contains the FCK Editor support for modu.editable. \"\"\" import os, os.path,", "if(item.startswith('.')): continue full_path = os.path.join(directory_path, item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size //", "UL_INVALID_TYPE = 202 UL_ACCESS_DENIED = 203 FLD_EXISTS = 101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED", "600)), \"%s.Height = \\\"%s\\\";\\n\" % (fck_var, self.get('height', 400)), \"%s.ToolbarSet = \\\"%s\\\";\\n\" % (fck_var,", "r'\\r') fck_value = fck_value.replace('\"', r'\\\"') fck_var = 'fck_%s' % self.name output = tags.script(type=\"text/javascript\")[[", "modu.util import form, tags from modu.web import resource, app SUCCESS = 0 CUSTOM_ERROR", "= '%s';\\n\" % self.get('fck_root', '/fck'), \" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\",", "(fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\" % (fck_var, self.get('width', 600)), \"%s.Height = \\\"%s\\\";\\n\" %", "to generate the FCK config file. @type content: str \"\"\" def __init__(self, **options):", "self.get_template(req) if(template is None): app.raise500(\"No template or content available.\") return admin_resource.select_template_root(req, template) def", "None): folder_path = '' elif(folder_path.startswith('/')): folder_path = folder_path[1:] folder_url = self.selected_root['url_callback'](req, folder_path) content", "folder_path): \"\"\" Provides support for file uploads within the server-side browser window. @param", "= \\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath = \\\"%s/\\\";\\n\" % (fck_var, fck_base_path), \"%s.Value =", "and save its path to the Storable. \"\"\" implements(editable.IDatatype) def get_element(self, req, style,", "str @ivar content: In most cases, the content to be returned, although it", "handle_upload(self, req, folder_path): \"\"\" Pulls upload data out of the request and saves", "result = UL_RENAME else: result = UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try: uploaded_file =", "request @type req: L{modu.web.app.Request} @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type", "'%s', '');\\n\" % (result, file_url, filename) ])] def prepare_browser(self, req): \"\"\" Provides support", "= self.selected_root['url_callback'](req, folder_path, filename) self.content_type = 'text/html' self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" %", "folder_path = get_data.get('CurrentFolder').value if(folder_path is None): folder_path = '' elif(folder_path.startswith('/')): folder_path = folder_path[1:]", "(storable.get_table(), self.name) # //$value = str_replace(\"'\", '&apos;', $value); fck_value = getattr(storable, self.get_column_name(), '')", "upload feature. @param req: The current request @type req: L{modu.web.app.Request} \"\"\" result, filename", "'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]' % (storable.get_table(), self.name) #", "str \"\"\" def __init__(self, **options): self.allowed_roots = { '__default__' : dict( perms =", "for key, config in options.get('allowed_roots', {}).items(): if('perms' not in config): config['perms'] = self.allowed_roots['__default__']['perms']", "None): fck_value = '' if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring() else: fck_value = str(fck_value)", "type to be returned by this resource, which changes depending on the particular", "folder_path) for item in os.listdir(directory_path): if(item.startswith('.')): continue full_path = os.path.join(directory_path, item) finfo =", "relative to C{self.selected_root['root_callback']} @type folder_path: str @param new_folder_name: The name of the folder", "@type folder_path: str \"\"\" result, filename = self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req, folder_path,", "self.get('toolbar_set', 'Standard')), \"%s.Create();\\n\" % fck_var ]] frm(type=\"markup\", value=output) return frm def get_search_value(self, value,", "= os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)): items.append(tags.Tag('File')(name=item, size=(finfo.st_size // 1024))) else: items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(),", "this resource, which changes depending on the particular paths accessed. @type content_type: str", "def get_selected_root(self, req): if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root'] def prepare_quick_upload(self, req):", "new folder creation. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']} @type folder_path:", "+ elementName + '-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\", \" var filemanager = '%s';\\n\"", "% self.name)) return frm def update_storable(self, req, frm, storable): form_name = '%s-form' %", "= 1 UL_RENAME = 201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED = 203 FLD_EXISTS =", "'Folder'])] if(not folders_only): file_string = ''.join([str(t) for t in items if t.tag ==", "e.val(value);\\n\", \" e = $('#' + elementName + '-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\",", "the particular paths accessed. @type content_type: str @ivar content: In most cases, the", "saves to the given folder. @param req: The current request @type req: L{modu.web.app.Request}", "def get_element(self, req, style, storable): default_value = getattr(storable, self.get_column_name(), '') frm = form.FormNode(self.name)", "The current request @type req: L{modu.web.app.Request} \"\"\" result, filename = self.handle_upload(req, self.get_selected_root(req)) file_url", "save its path to the Storable. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable):", "if not(default_value): default_value = '(none)' frm(type='label', value=default_value) return frm filemgr_path = req.get_path('assets/fckeditor/editor/filemanager/browser/default/browser.html') assets.activate_jquery(req)", "lambda req, *path: req.get_path(*path), ), } for key, config in options.get('allowed_roots', {}).items(): if('perms'", "== 'Folder'])] if(not folders_only): file_string = ''.join([str(t) for t in items if t.tag", "def prepare_quick_upload(self, req): \"\"\" Provides support for the FCK quick upload feature. @param", "req.app.webroot), url_callback = lambda req, *path: req.get_path(*path), ), } for key, config in", "file_url = self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result,", "t.tag == 'Folder'])] if(not folders_only): file_string = ''.join([str(t) for t in items if", "UL_INVALID_TYPE): try: uploaded_file = open(destination_path, 'w') bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes =", "from a given directory and save its path to the Storable. \"\"\" implements(editable.IDatatype)", "= tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return content def file_upload(self, req, folder_path): \"\"\"", "int(time.time()) filename = '.'.join(parts) result = UL_RENAME else: result = UL_INVALID_TYPE if(result !=", "to support new folder creation. @param folder_path: The current folder, relative to C{self.selected_root['root_callback']}", "tags from modu.web import resource, app SUCCESS = 0 CUSTOM_ERROR = 1 UL_RENAME", "resource, app SUCCESS = 0 CUSTOM_ERROR = 1 UL_RENAME = 201 UL_INVALID_TYPE =", "tags.script(type=\"text/javascript\", src=req.get_path('assets', 'fckeditor', 'fckeditor.js'))['']) fck_custom_config = req.get_path(self.get('fck_root', '/fck'), 'fckconfig-custom.js') fck_element_name = '%s-form[%s]' %", "= 'fckconfig-custom.js.tmpl' def get_directory_items(self, req, folder_path, folders_only): \"\"\" Used by browser code to", "% (fck_var, fck_base_path), \"%s.Value = \\\"%s\\\";\\n\" % (fck_var, fck_value), \"%s.Width = \\\"%s\\\";\\n\" %", "None if(req.postpath and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key = req.postpath[0] else:", "def create_folder(self, req, folder_path, new_folder_name): \"\"\" Used by browser code to support new", "content def create_folder(self, req, folder_path, new_folder_name): \"\"\" Used by browser code to support", "folder_path, True) elif(command_name == 'GetFoldersAndFiles'): content += self.get_directory_items(req, folder_path, False) elif(command_name == 'CreateFolder'):", "import resource as admin_resource from modu.util import form, tags from modu.web import resource,", "@ivar selected_root: Details for the file upload directory. @type selected_root: dict @ivar content_type:", "os.path.join(self.get_selected_root(req), folder_path) for item in os.listdir(directory_path): if(item.startswith('.')): continue full_path = os.path.join(directory_path, item) finfo", "to, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result = UL_ACCESS_DENIED data =", "= get_data.get('Type').value new_folder_name = get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path is None): folder_path =", "self.name)) return frm def update_storable(self, req, frm, storable): form_name = '%s-form' % storable.get_table()", "tags.Tag('Files')[file_string] return content def create_folder(self, req, folder_path, new_folder_name): \"\"\" Used by browser code", "changes depending on the particular paths accessed. @type content_type: str @ivar content: In", "= '__default__' self.selected_root = self.allowed_roots[root_key] if not(req.user.is_allowed(self.selected_root['perms'])): app.raise403() if(req.postpath and req.postpath[0] == 'upload'):", "data = req.data if(req['REQUEST_METHOD'] == 'POST'): get_data = form.parse_query_string(req) else: get_data = data", "% self.name output = tags.script(type=\"text/javascript\")[[ \"var %s = new FCKeditor('%s');\\n\" % (fck_var, fck_element_name),", "define from modu.editable import resource as admin_resource from modu.util import form, tags from", "== 'POST'): get_data = form.parse_query_string(req) else: get_data = data command_name = get_data.get('Command').value resource_type", "self.selected_root['url_callback'](req, folder_path) content = tags.Tag('CurrentFolder')(path=folder_path, url=folder_url) if(command_name == 'GetFolders'): content += self.get_directory_items(req, folder_path,", "The current folder, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result, filename =", "that displays the FCK rich text editor. \"\"\" implements(editable.IDatatype) def get_element(self, req, style,", "None when using the template to generate the FCK config file. @type content:", "result = UL_ACCESS_DENIED data = req.data fileitem = data['NewFile'] filename = fileitem.filename destination_path", "= getattr(storable, self.get_column_name(), '') frm = form.FormNode(self.name) if(style == 'listing' or self.get('read_only', False)):", "content += self.get_directory_items(req, folder_path, False) elif(command_name == 'CreateFolder'): content += self.create_folder(req, folder_path, new_folder_name)", "the file upload directory. @type selected_root: dict @ivar content_type: The content type to", "return admin_resource.select_template_root(req, template) def get_selected_root(self, req): if('root_callback' in self.selected_root): return self.selected_root['root_callback'](req) return self.selected_root['root']", "if t.tag == 'File']) if(file_string): content += tags.Tag('Files')[file_string] return content def create_folder(self, req,", "content_type: The content type to be returned by this resource, which changes depending", "req, *path: req.get_path(*path), ), } for key, config in options.get('allowed_roots', {}).items(): if('perms' not", "suffix = tags.input(type=\"button\", value=\"Select...\", id='%s-select-button' % self.name, onclick=\"getFile('%s')\" % self.name) req.content.report('header', tags.script(type=\"text/javascript\")[[ \"function", "$('#' + elementName + '-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\", \" var filemanager =", "folders_only: If True, only list folders @type req: bool \"\"\" items = []", "resourceType=resource_type)[str(content)] self.content_type = 'text/xml' self.content = [output] def prepare_config_request(self, req): \"\"\" Uses a", "req, folder_path, new_folder_name): \"\"\" Used by browser code to support new folder creation.", "modu.persist import sql from modu.editable import define from modu.editable import resource as admin_resource", "content: str \"\"\" def __init__(self, **options): self.allowed_roots = { '__default__' : dict( perms", "\"\"\" @see: L{modu.web.resource.IContent.get_content_type()} \"\"\" return '%s; charset=UTF-8' % self.content_type def get_content(self, req): \"\"\"", "+ '-value-label');\\n\", \" e.val(value);\\n\", \" };\\n\", \" var filemanager = '%s';\\n\" % filemgr_path,", "relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result = UL_ACCESS_DENIED data = req.data", "'') if(fck_value is None): fck_value = '' if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring() else:", "uploaded_file = open(destination_path, 'w') bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close()", "content def file_upload(self, req, folder_path): \"\"\" Provides support for file uploads within the", "its path to the Storable. \"\"\" implements(editable.IDatatype) def get_element(self, req, style, storable): default_value", "open(destination_path, 'w') bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes) bytes = fileitem.file.read(65536) uploaded_file.close() result =", "= tags.Tag('Error')(number=FLD_EXISTS) else: try: os.mkdir(new_path) content = tags.Tag('Error')(number=SUCCESS) except: content = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR) return", "= 201 UL_INVALID_TYPE = 202 UL_ACCESS_DENIED = 203 FLD_EXISTS = 101 FLD_INVALID_NAME =", "return super(FCKEditorResource, self).get_content(req) def get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\" return self.template def", "@type folder_path: str @param folders_only: If True, only list folders @type req: bool", "% self.get('fck_root', '/fck'), \" var win = window.open(filemanager+'?Connector='+connector+'&Type=Image','fileupload','width=600,height=400');\\n\", \" win.focus();\\n\", \"}\\n\", ]]) frm['label'](type=\"textfield\",", "folder, relative to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result, filename = self.handle_upload(req, folder_path)", "and req.postpath[0] == 'fckconfig-custom.js'): self.prepare_config_request(req) return if(req.postpath): root_key = req.postpath[0] else: root_key =", "if(self.content): return self.content return super(FCKEditorResource, self).get_content(req) def get_template(self, req): \"\"\" @see: L{modu.web.resource.ITemplate.get_template()} \"\"\"", "fileitem.file.read(65536) uploaded_file.close() result = SUCCESS except: import traceback print traceback.print_exc() result = UL_ACCESS_DENIED", "def update_storable(self, req, frm, storable): form_name = '%s-form' % storable.get_table() if(form_name in req.data):", "item in os.listdir(directory_path): if(item.startswith('.')): continue full_path = os.path.join(directory_path, item) finfo = os.stat(full_path) if(stat.S_ISREG(finfo.st_mode)):", "if(form_name in req.data): form_data = req.data[form_name] if(self.name in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return", "req, style, storable): default_value = getattr(storable, self.get_column_name(), '') frm = form.FormNode(self.name) if(style ==", "= 101 FLD_INVALID_NAME = 102 FLD_ACCESS_DENIED = 103 FLD_UNKNOWN_ERROR = 110 class FCKEditorField(define.definition):", "in config): config['url_callback'] = self.allowed_roots['__default__']['url_callback'] self.allowed_roots[key] = config def prepare_content(self, req): \"\"\" @see:", "@see: L{modu.editable.define.definition.get_element()} \"\"\" frm = form.FormNode(self.name) if(style == 'listing'): frm(type='label', value='(html content)') return", "L{modu.web.resource.IContent.prepare_content()} \"\"\" self.content_type = 'text/html' self.content = None self.template = None if(req.postpath and", "req): \"\"\" Provides support for the FCK server-side file browser. @param req: The", "items.append(tags.Tag('Folder')(name=item)) items.sort(lambda a, b: cmp(a.attributes['name'].lower(), b.attributes['name'].lower())) content = tags.Tag('Folders')[''.join([str(t) for t in items", "req, folder_path): \"\"\" Provides support for file uploads within the server-side browser window.", "= self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req, folder_path, filename) self.content_type = 'text/html' self.content =", "= '%s-form' % storable.get_table() if(form_name in req.data): form_data = req.data[form_name] if(self.name in form_data):", "= get_data.get('NewFolderName').value folder_path = get_data.get('CurrentFolder').value if(folder_path is None): folder_path = '' elif(folder_path.startswith('/')): folder_path", "\"\"\" Uses a Cheetah template to serve up the per-site FCK configuration file.", "= '%s';\\n\" % filemgr_path, \" var connector = '%s';\\n\" % self.get('fck_root', '/fck'), \"", "result, filename = self.handle_upload(req, self.get_selected_root(req)) file_url = self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s,", "to C{self.selected_root['root_callback']} @type folder_path: str \"\"\" result = UL_ACCESS_DENIED data = req.data fileitem", "'POST'): get_data = form.parse_query_string(req) else: get_data = data command_name = get_data.get('Command').value resource_type =", "A field type that displays the FCK rich text editor. \"\"\" implements(editable.IDatatype) def", "if(result != UL_INVALID_TYPE): try: uploaded_file = open(destination_path, 'w') bytes = fileitem.file.read(65536) while(bytes): uploaded_file.write(bytes)", "UL_INVALID_TYPE if(result != UL_INVALID_TYPE): try: uploaded_file = open(destination_path, 'w') bytes = fileitem.file.read(65536) while(bytes):", "= '' if(isinstance(fck_value, array.array)): fck_value = fck_value.tostring() else: fck_value = str(fck_value) fck_value =", "in form_data): setattr(storable, self.get_column_name(), form_data[self.name]['value'].value) return True class FCKEditorResource(resource.CheetahTemplateResource): \"\"\" Provides server-side support", "if(template is None): app.raise500(\"No template or content available.\") return admin_resource.select_template_root(req, template) def get_selected_root(self,", "class FCKEditorField(define.definition): \"\"\" A field type that displays the FCK rich text editor.", "self.selected_root['url_callback'](req, filename) self.content = [str(tags.script(type=\"text/javascript\")[ \"window.parent.OnUploadCompleted(%s, '%s', '%s', '');\\n\" % (result, file_url, filename)", "folders_only): \"\"\" Used by browser code to support directory listing. @param folder_path: The", "req.data if(req['REQUEST_METHOD'] == 'POST'): get_data = form.parse_query_string(req) else: get_data = data command_name =", "content)') return frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(), '')) return frm fck_base_path =", "= new FCKeditor('%s');\\n\" % (fck_var, fck_element_name), \"%s.Config['CustomConfigurationsPath'] = \\\"%s\\\";\\n\" % (fck_var, fck_custom_config), \"%s.BasePath", "\"\"\" result, filename = self.handle_upload(req, folder_path) file_url = self.selected_root['url_callback'](req, folder_path, filename) self.content_type =", "for details \"\"\" Contains the FCK Editor support for modu.editable. \"\"\" import os,", "str \"\"\" result = UL_ACCESS_DENIED data = req.data fileitem = data['NewFile'] filename =", "= 'text/xml' self.content = [output] def prepare_config_request(self, req): \"\"\" Uses a Cheetah template", "current request @type req: L{modu.web.app.Request} \"\"\" self.content_type = 'text/javascript' self.template = 'fckconfig-custom.js.tmpl' def", "== 'listing'): frm(type='label', value='(html content)') return frm if(self.get('read_only', False)): frm(type='label', value=getattr(storable, self.get_column_name(), ''))", "frm = form.FormNode(self.name) if(style == 'listing' or self.get('read_only', False)): if not(default_value): default_value =", "= $('#' + elementName + '-value-field');\\n\", \" e.val(value);\\n\", \" e = $('#' +", "= os.path.join(self.get_selected_root(req), folder_path) #prevent shenanigans new_folder_name = new_folder_name.split('/').pop() new_path = os.path.join(directory_path, new_folder_name) if(os.access(new_path,", "@param req: The current request @type req: L{modu.web.app.Request} @param folder_path: The folder to", "Provides support for the FCK server-side file browser. @param req: The current request", "return else: return output = '<?xml version=\"1.0\" encoding=\"utf-8\" ?>' output += tags.Tag('Connector')(command=command_name, resourceType=resource_type)[str(content)]" ]
[ "folder\") @click.argument(\"url\", required=False, type=str) @pass_environment def cli(ctx, url, git, branch, brick): run_cmd(ctx, 'init')", "branch, brick): run_cmd(ctx, 'init') run_cmd(ctx, 'deps.add', branch=branch, brick=brick, force=True, path=\".\", url=url) run_cmd(ctx, 'gen.up')", "\"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency via url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False,", "@click.argument(\"url\", required=False, type=str) @pass_environment def cli(ctx, url, git, branch, brick): run_cmd(ctx, 'init') run_cmd(ctx,", "click import json from bldr.cli import pass_environment, run_cmd from bldr.gen.render import render import", "bldr modules folder\") @click.argument(\"url\", required=False, type=str) @pass_environment def cli(ctx, url, git, branch, brick):", "import sys from git.objects.submodule.root import RootUpdateProgress import bldr import bldr.gen.render import bldr.util import", "@click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the dependency", "json from bldr.cli import pass_environment, run_cmd from bldr.gen.render import render import bldr.dep.env dotbldr_path", "run_cmd from bldr.gen.render import render import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add", "short_help=\"Add new dependency via url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\",", "@pass_environment def cli(ctx, url, git, branch, brick): run_cmd(ctx, 'init') run_cmd(ctx, 'deps.add', branch=branch, brick=brick,", "git.objects.submodule.root import RootUpdateProgress import bldr import bldr.gen.render import bldr.util import giturlparse from git", "flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the dependency to the", "the bldr modules folder\") @click.argument(\"url\", required=False, type=str) @pass_environment def cli(ctx, url, git, branch,", "the dependency to the bldr modules folder\") @click.argument(\"url\", required=False, type=str) @pass_environment def cli(ctx,", "bldr.cli import pass_environment, run_cmd from bldr.gen.render import render import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)),", "url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the", "import bldr import bldr.gen.render import bldr.util import giturlparse from git import Repo from", "from bldr.environment import Environment import os import sys from git.objects.submodule.root import RootUpdateProgress import", "<reponame>bldr-cmd/bldr-cmd<filename>bldr/cmd/new.py<gh_stars>0 \"\"\" `deps.get` Command \"\"\" from bldr.environment import Environment import os import sys", "`deps.get` Command \"\"\" from bldr.environment import Environment import os import sys from git.objects.submodule.root", "new dependency via url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\",", "os import sys from git.objects.submodule.root import RootUpdateProgress import bldr import bldr.gen.render import bldr.util", "import pass_environment, run_cmd from bldr.gen.render import render import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\")", "import os import sys from git.objects.submodule.root import RootUpdateProgress import bldr import bldr.gen.render import", "cli(ctx, url, git, branch, brick): run_cmd(ctx, 'init') run_cmd(ctx, 'deps.add', branch=branch, brick=brick, force=True, path=\".\",", "dependency via url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True,", "import bldr.gen.render import bldr.util import giturlparse from git import Repo from pathlib import", "bldr.gen.render import bldr.util import giturlparse from git import Repo from pathlib import Path", "import render import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency via", "from bldr.cli import pass_environment, run_cmd from bldr.gen.render import render import bldr.dep.env dotbldr_path =", "sys from git.objects.submodule.root import RootUpdateProgress import bldr import bldr.gen.render import bldr.util import giturlparse", "dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency via url\") @click.option(\"-g\", \"--git\", flag_value=True)", "import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency via url\") @click.option(\"-g\",", "import Repo from pathlib import Path import click import json from bldr.cli import", "os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency via url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\",", "\"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the dependency to", "def cli(ctx, url, git, branch, brick): run_cmd(ctx, 'init') run_cmd(ctx, 'deps.add', branch=branch, brick=brick, force=True,", "\"\"\" `deps.get` Command \"\"\" from bldr.environment import Environment import os import sys from", "required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the dependency to the bldr modules folder\")", "via url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add", "import json from bldr.cli import pass_environment, run_cmd from bldr.gen.render import render import bldr.dep.env", "from git.objects.submodule.root import RootUpdateProgress import bldr import bldr.gen.render import bldr.util import giturlparse from", "dependency to the bldr modules folder\") @click.argument(\"url\", required=False, type=str) @pass_environment def cli(ctx, url,", "= os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency via url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\",", "Path import click import json from bldr.cli import pass_environment, run_cmd from bldr.gen.render import", "Command \"\"\" from bldr.environment import Environment import os import sys from git.objects.submodule.root import", "giturlparse from git import Repo from pathlib import Path import click import json", "required=False, type=str) @pass_environment def cli(ctx, url, git, branch, brick): run_cmd(ctx, 'init') run_cmd(ctx, 'deps.add',", "Environment import os import sys from git.objects.submodule.root import RootUpdateProgress import bldr import bldr.gen.render", "RootUpdateProgress import bldr import bldr.gen.render import bldr.util import giturlparse from git import Repo", "flag_value=True, help=\"Add the dependency to the bldr modules folder\") @click.argument(\"url\", required=False, type=str) @pass_environment", "bldr.gen.render import render import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency", "from bldr.gen.render import render import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new", "from git import Repo from pathlib import Path import click import json from", "bldr.util import giturlparse from git import Repo from pathlib import Path import click", "pass_environment, run_cmd from bldr.gen.render import render import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\",", "import Path import click import json from bldr.cli import pass_environment, run_cmd from bldr.gen.render", "git, branch, brick): run_cmd(ctx, 'init') run_cmd(ctx, 'deps.add', branch=branch, brick=brick, force=True, path=\".\", url=url) run_cmd(ctx,", "pathlib import Path import click import json from bldr.cli import pass_environment, run_cmd from", "type=str) @pass_environment def cli(ctx, url, git, branch, brick): run_cmd(ctx, 'init') run_cmd(ctx, 'deps.add', branch=branch,", "import giturlparse from git import Repo from pathlib import Path import click import", "\"\"\" from bldr.environment import Environment import os import sys from git.objects.submodule.root import RootUpdateProgress", "bldr import bldr.gen.render import bldr.util import giturlparse from git import Repo from pathlib", "type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the dependency to the bldr modules folder\") @click.argument(\"url\",", "import bldr.util import giturlparse from git import Repo from pathlib import Path import", "bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency via url\") @click.option(\"-g\", \"--git\",", "from pathlib import Path import click import json from bldr.cli import pass_environment, run_cmd", "help=\"Add the dependency to the bldr modules folder\") @click.argument(\"url\", required=False, type=str) @pass_environment def", "import RootUpdateProgress import bldr import bldr.gen.render import bldr.util import giturlparse from git import", "import Environment import os import sys from git.objects.submodule.root import RootUpdateProgress import bldr import", "bldr.environment import Environment import os import sys from git.objects.submodule.root import RootUpdateProgress import bldr", "git import Repo from pathlib import Path import click import json from bldr.cli", "to the bldr modules folder\") @click.argument(\"url\", required=False, type=str) @pass_environment def cli(ctx, url, git,", "url, git, branch, brick): run_cmd(ctx, 'init') run_cmd(ctx, 'deps.add', branch=branch, brick=brick, force=True, path=\".\", url=url)", "Repo from pathlib import Path import click import json from bldr.cli import pass_environment,", "\"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the dependency to the bldr modules", "modules folder\") @click.argument(\"url\", required=False, type=str) @pass_environment def cli(ctx, url, git, branch, brick): run_cmd(ctx,", "@click.option(\"-b\", \"--branch\", required=False, type=str) @click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the dependency to the bldr", "render import bldr.dep.env dotbldr_path = os.path.join(os.path.abspath(os.path.dirname(bldr.__file__)), \"dotbldr\") @click.command(\"new\", short_help=\"Add new dependency via url\")", "@click.command(\"new\", short_help=\"Add new dependency via url\") @click.option(\"-g\", \"--git\", flag_value=True) @click.option(\"-b\", \"--branch\", required=False, type=str)", "@click.option(\"-k\", \"--brick\", flag_value=True, help=\"Add the dependency to the bldr modules folder\") @click.argument(\"url\", required=False,", "import click import json from bldr.cli import pass_environment, run_cmd from bldr.gen.render import render", "\"--brick\", flag_value=True, help=\"Add the dependency to the bldr modules folder\") @click.argument(\"url\", required=False, type=str)" ]
[ "def rmse(data, speaker, ref_dir=None): 'Vertical and horizontal localization accuracies were quantified by computing", "azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi", "the discrep- ancies between perceived and physical locations (RMSE, Hartmann, 1983; Savel, 2009).'", "1983; Savel, 2009).' if ref_dir is None: ref_dir = mean_dir(data, speaker) idx =", "= np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z = np.cos(theta) return x, y, z def", "if ref_dir is None: ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1] == speaker)", "discrep- ancies between perceived and physical locations (RMSE, Hartmann, 1983; Savel, 2009).' if", "defined as the slope of the linear regression of perceived versus physical elevations", "/ np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi = -1 * azimuth theta", "the elevations for the speakers in the presented sequence regression = stats.linregress(eles, elevation_seq)", "polar_to_cartesian(phi, theta): phi, theta = np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi)", "return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): ''' Vertical localization performance was also quantified by", "while random elevation responses result in an EG of 0.''' eles = data[:,3]", "mean_dir(data, speaker): # use vector addition with uncorrected angles: # sines, cosines =", "an EG of 0.''' eles = data[:,3] if speaker_positions is None: return np.percentile(eles,", "def polar_to_single_pole(phi, theta): azimuth = phi * -1 elevation = theta + 90", "= data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None): 'Vertical and horizontal", "elevation = theta + 90 return azimuth, elevation def polar_to_cartesian(phi, theta): phi, theta", "idx = np.where(data[:, 1] == speaker) return data[idx, 2:4].mean(axis=1) def mad(data, speaker, ref_dir=None):", "theta = elevation - 90 return phi, theta def polar_to_single_pole(phi, theta): azimuth =", "cosines = _sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use regular addition", "data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None): 'Vertical and horizontal localization", "azimuth = phi * -1 elevation = theta + 90 return azimuth, elevation", "quantified by the EG, defined as the slope of the linear regression of", "= np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi =", "Perfect localization corresponds to an EG of 1, while random elevation responses result", "np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z = np.cos(theta) return x, y, z def mean_dir(data,", "0.''' eles = data[:,3] if speaker_positions is None: return np.percentile(eles, 75) - np.percentile(eles,", "angles: idx = np.where(data[:, 1] == speaker) return data[idx, 2:4].mean(axis=1) def mad(data, speaker,", "locations (RMSE, Hartmann, 1983; Savel, 2009).' if ref_dir is None: ref_dir = mean_dir(data,", "scipy import stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single =", "* azimuth theta = elevation - 90 return phi, theta def polar_to_single_pole(phi, theta):", "# use vector addition with uncorrected angles: # sines, cosines = _sines_cosines(data, speaker)", "elevation def polar_to_cartesian(phi, theta): phi, theta = np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y", "- ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None): 'Vertical and horizontal localization accuracies", "np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z = np.cos(theta) return x, y,", "data[:,3] if speaker_positions is None: return np.percentile(eles, 75) - np.percentile(eles, 25) speaker_seq =", "def eg(data, speaker_positions=None): ''' Vertical localization performance was also quantified by the EG,", "is None: ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1] == speaker) diffs =", "'Mean absolute difference between reference directions and pointed directions' if ref_dir is None:", "numbers elevation_seq = speaker_positions[speaker_seq,1] # get the elevations for the speakers in the", "localization performance was also quantified by the EG, defined as the slope of", "np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth,", "horizontal localization accuracies were quantified by computing the root mean square of the", "<filename>freefield/analysis.py<gh_stars>1-10 import numpy as np from scipy import stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double,", "directions' if ref_dir is None: ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1] ==", "is None: return np.percentile(eles, 75) - np.percentile(eles, 25) speaker_seq = data[:,1].astype(int) # presented", "= elevation - 90 return phi, theta def polar_to_single_pole(phi, theta): azimuth = phi", "/ np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi = -1 *", "= np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z = np.cos(theta) return", "np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi = -1", "also quantified by the EG, defined as the slope of the linear regression", "return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use regular addition with corrected angles: idx =", "stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) /", "of perceived versus physical elevations (Hofman et al., 1998). Perfect localization corresponds to", "Savel, 2009).' if ref_dir is None: ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1]", "of 1, while random elevation responses result in an EG of 0.''' eles", "to an EG of 1, while random elevation responses result in an EG", "import numpy as np from scipy import stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double", "by the EG, defined as the slope of the linear regression of perceived", "return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi = -1 * azimuth theta = elevation", "Hartmann, 1983; Savel, 2009).' if ref_dir is None: ref_dir = mean_dir(data, speaker) idx", "with corrected angles: idx = np.where(data[:, 1] == speaker) return data[idx, 2:4].mean(axis=1) def", "speaker_positions=None): ''' Vertical localization performance was also quantified by the EG, defined as", "EG of 1, while random elevation responses result in an EG of 0.'''", "speaker_seq = data[:,1].astype(int) # presented sequence of speaker numbers elevation_seq = speaker_positions[speaker_seq,1] #", "speaker, ref_dir=None): 'Mean absolute difference between reference directions and pointed directions' if ref_dir", "y = np.sin(theta)*np.sin(phi) z = np.cos(theta) return x, y, z def mean_dir(data, speaker):", "2:4].mean(axis=1) def mad(data, speaker, ref_dir=None): 'Mean absolute difference between reference directions and pointed", "localization accuracies were quantified by computing the root mean square of the discrep-", "single_pole_to_polar(azimuth, elevation): phi = -1 * azimuth theta = elevation - 90 return", "def mean_dir(data, speaker): # use vector addition with uncorrected angles: # sines, cosines", "= data[idx,2:4] - ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): '''", "vector addition with uncorrected angles: # sines, cosines = _sines_cosines(data, speaker) # return", "use regular addition with corrected angles: idx = np.where(data[:, 1] == speaker) return", "mean_dir(data, speaker) idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir return", "return x, y, z def mean_dir(data, speaker): # use vector addition with uncorrected", "as np from scipy import stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double),", "np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z = np.cos(theta) return x,", "return data[idx, 2:4].mean(axis=1) def mad(data, speaker, ref_dir=None): 'Mean absolute difference between reference directions", "def polar_to_cartesian(phi, theta): phi, theta = np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y =", "2009).' if ref_dir is None: ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1] ==", "Vertical localization performance was also quantified by the EG, defined as the slope", "polar_to_single_pole(phi, theta): azimuth = phi * -1 elevation = theta + 90 return", "from scipy import stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single", "np.sin(theta)*np.sin(phi) z = np.cos(theta) return x, y, z def mean_dir(data, speaker): # use", "idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir dist = np.sqrt((diffs**2).sum(axis=2))", "regression of perceived versus physical elevations (Hofman et al., 1998). Perfect localization corresponds", "np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation):", "-1 elevation = theta + 90 return azimuth, elevation def polar_to_cartesian(phi, theta): phi,", "= np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single) def", "phi, theta = np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z =", "uncorrected angles: # sines, cosines = _sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten()", "square of the discrep- ancies between perceived and physical locations (RMSE, Hartmann, 1983;", "localization corresponds to an EG of 1, while random elevation responses result in", "the EG, defined as the slope of the linear regression of perceived versus", "if speaker_positions is None: return np.percentile(eles, 75) - np.percentile(eles, 25) speaker_seq = data[:,1].astype(int)", "= _sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use regular addition with", "def single_pole_to_polar(azimuth, elevation): phi = -1 * azimuth theta = elevation - 90", "mean square of the discrep- ancies between perceived and physical locations (RMSE, Hartmann,", "addition with corrected angles: idx = np.where(data[:, 1] == speaker) return data[idx, 2:4].mean(axis=1)", "None: ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4]", "result in an EG of 0.''' eles = data[:,3] if speaker_positions is None:", "speaker) diffs = data[idx,2:4] - ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data,", "ref_dir is None: ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1] == speaker) diffs", "regular addition with corrected angles: idx = np.where(data[:, 1] == speaker) return data[idx,", "(Hofman et al., 1998). Perfect localization corresponds to an EG of 1, while", "phi = -1 * azimuth theta = elevation - 90 return phi, theta", "elevation - 90 return phi, theta def polar_to_single_pole(phi, theta): azimuth = phi *", "data[idx, 2:4].mean(axis=1) def mad(data, speaker, ref_dir=None): 'Mean absolute difference between reference directions and", "speaker) diffs = data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None): 'Vertical", "of the linear regression of perceived versus physical elevations (Hofman et al., 1998).", "al., 1998). Perfect localization corresponds to an EG of 1, while random elevation", "= phi * -1 elevation = theta + 90 return azimuth, elevation def", "between reference directions and pointed directions' if ref_dir is None: ref_dir = mean_dir(data,", "ancies between perceived and physical locations (RMSE, Hartmann, 1983; Savel, 2009).' if ref_dir", "theta): azimuth = phi * -1 elevation = theta + 90 return azimuth,", "phi, theta def polar_to_single_pole(phi, theta): azimuth = phi * -1 elevation = theta", "ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None): 'Vertical and horizontal localization accuracies were", "np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): ''' Vertical localization performance was also quantified by the", "25) speaker_seq = data[:,1].astype(int) # presented sequence of speaker numbers elevation_seq = speaker_positions[speaker_seq,1]", "elevation_seq = speaker_positions[speaker_seq,1] # get the elevations for the speakers in the presented", "mad(data, speaker, ref_dir=None): 'Mean absolute difference between reference directions and pointed directions' if", "azimuth, elevation def polar_to_cartesian(phi, theta): phi, theta = np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi)", "z def mean_dir(data, speaker): # use vector addition with uncorrected angles: # sines,", "90 return azimuth, elevation def polar_to_cartesian(phi, theta): phi, theta = np.deg2rad(phi), np.deg2rad(theta) x", "cosines.sum(axis=1)).flatten() # use regular addition with corrected angles: idx = np.where(data[:, 1] ==", "= mean_dir(data, speaker) idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir", "75) - np.percentile(eles, 25) speaker_seq = data[:,1].astype(int) # presented sequence of speaker numbers", "for the speakers in the presented sequence regression = stats.linregress(eles, elevation_seq) return regression.slope", "between perceived and physical locations (RMSE, Hartmann, 1983; Savel, 2009).' if ref_dir is", "were quantified by computing the root mean square of the discrep- ancies between", "elevation responses result in an EG of 0.''' eles = data[:,3] if speaker_positions", "= np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data,", "slope of the linear regression of perceived versus physical elevations (Hofman et al.,", "= np.where(data[:, 1] == speaker) return data[idx, 2:4].mean(axis=1) def mad(data, speaker, ref_dir=None): 'Mean", "EG, defined as the slope of the linear regression of perceived versus physical", "speaker) # return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use regular addition with corrected angles:", "presented sequence of speaker numbers elevation_seq = speaker_positions[speaker_seq,1] # get the elevations for", "np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi = -1 * azimuth", "= np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): ''' Vertical localization performance was also", "reference directions and pointed directions' if ref_dir is None: ref_dir = mean_dir(data, speaker)", "versus physical elevations (Hofman et al., 1998). Perfect localization corresponds to an EG", "= data[:,1].astype(int) # presented sequence of speaker numbers elevation_seq = speaker_positions[speaker_seq,1] # get", "z = np.cos(theta) return x, y, z def mean_dir(data, speaker): # use vector", "phi * -1 elevation = theta + 90 return azimuth, elevation def polar_to_cartesian(phi,", "theta = np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z = np.cos(theta)", "/ cosines.sum(axis=1)).flatten() # use regular addition with corrected angles: idx = np.where(data[:, 1]", "perceived versus physical elevations (Hofman et al., 1998). Perfect localization corresponds to an", "ref_dir=None): 'Vertical and horizontal localization accuracies were quantified by computing the root mean", "np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None): 'Vertical and horizontal localization accuracies were quantified by", "np.percentile(eles, 25) speaker_seq = data[:,1].astype(int) # presented sequence of speaker numbers elevation_seq =", "of speaker numbers elevation_seq = speaker_positions[speaker_seq,1] # get the elevations for the speakers", "the linear regression of perceived versus physical elevations (Hofman et al., 1998). Perfect", "speaker) idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean()", "speaker, ref_dir=None): 'Vertical and horizontal localization accuracies were quantified by computing the root", "np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean())", "np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker,", "speaker_positions is None: return np.percentile(eles, 75) - np.percentile(eles, 25) speaker_seq = data[:,1].astype(int) #", "rmse(data, speaker, ref_dir=None): 'Vertical and horizontal localization accuracies were quantified by computing the", "None: return np.percentile(eles, 75) - np.percentile(eles, 25) speaker_seq = data[:,1].astype(int) # presented sequence", "et al., 1998). Perfect localization corresponds to an EG of 1, while random", "''' Vertical localization performance was also quantified by the EG, defined as the", "# return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use regular addition with corrected angles: idx", "== speaker) return data[idx, 2:4].mean(axis=1) def mad(data, speaker, ref_dir=None): 'Mean absolute difference between", "1, while random elevation responses result in an EG of 0.''' eles =", "-1 * azimuth theta = elevation - 90 return phi, theta def polar_to_single_pole(phi,", "= -1 * azimuth theta = elevation - 90 return phi, theta def", "was also quantified by the EG, defined as the slope of the linear", "np.percentile(eles, 75) - np.percentile(eles, 25) speaker_seq = data[:,1].astype(int) # presented sequence of speaker", "speaker) return data[idx, 2:4].mean(axis=1) def mad(data, speaker, ref_dir=None): 'Mean absolute difference between reference", "x, y, z def mean_dir(data, speaker): # use vector addition with uncorrected angles:", "perceived and physical locations (RMSE, Hartmann, 1983; Savel, 2009).' if ref_dir is None:", "directions and pointed directions' if ref_dir is None: ref_dir = mean_dir(data, speaker) idx", "elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double)) return np.rad2deg(azimuth_single)", "and pointed directions' if ref_dir is None: ref_dir = mean_dir(data, speaker) idx =", "azimuth theta = elevation - 90 return phi, theta def polar_to_single_pole(phi, theta): azimuth", "= np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return", "eg(data, speaker_positions=None): ''' Vertical localization performance was also quantified by the EG, defined", "== speaker) diffs = data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None):", "90 return phi, theta def polar_to_single_pole(phi, theta): azimuth = phi * -1 elevation", "addition with uncorrected angles: # sines, cosines = _sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1)", "sines, cosines = _sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use regular", "ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4] -", "theta): phi, theta = np.deg2rad(phi), np.deg2rad(theta) x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z", "'Vertical and horizontal localization accuracies were quantified by computing the root mean square", "theta + 90 return azimuth, elevation def polar_to_cartesian(phi, theta): phi, theta = np.deg2rad(phi),", "(RMSE, Hartmann, 1983; Savel, 2009).' if ref_dir is None: ref_dir = mean_dir(data, speaker)", "== speaker) diffs = data[idx,2:4] - ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def", "1] == speaker) return data[idx, 2:4].mean(axis=1) def mad(data, speaker, ref_dir=None): 'Mean absolute difference", "corrected angles: idx = np.where(data[:, 1] == speaker) return data[idx, 2:4].mean(axis=1) def mad(data,", "np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): ''' Vertical localization performance was also quantified", "as the slope of the linear regression of perceived versus physical elevations (Hofman", "use vector addition with uncorrected angles: # sines, cosines = _sines_cosines(data, speaker) #", "an EG of 1, while random elevation responses result in an EG of", "physical elevations (Hofman et al., 1998). Perfect localization corresponds to an EG of", "# presented sequence of speaker numbers elevation_seq = speaker_positions[speaker_seq,1] # get the elevations", "_sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use regular addition with corrected", "= speaker_positions[speaker_seq,1] # get the elevations for the speakers in the presented sequence", "import stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double)", "mean_dir(data, speaker) idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir dist", "pointed directions' if ref_dir is None: ref_dir = mean_dir(data, speaker) idx = np.where(data[:,1]", "numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use regular addition with corrected angles: idx = np.where(data[:,", "# get the elevations for the speakers in the presented sequence regression =", "and physical locations (RMSE, Hartmann, 1983; Savel, 2009).' if ref_dir is None: ref_dir", "return azimuth, elevation def polar_to_cartesian(phi, theta): phi, theta = np.deg2rad(phi), np.deg2rad(theta) x =", "of the discrep- ancies between perceived and physical locations (RMSE, Hartmann, 1983; Savel,", "+ 90 return azimuth, elevation def polar_to_cartesian(phi, theta): phi, theta = np.deg2rad(phi), np.deg2rad(theta)", "return phi, theta def polar_to_single_pole(phi, theta): azimuth = phi * -1 elevation =", "- ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): ''' Vertical localization", "def mad(data, speaker, ref_dir=None): 'Mean absolute difference between reference directions and pointed directions'", "of 0.''' eles = data[:,3] if speaker_positions is None: return np.percentile(eles, 75) -", "eles = data[:,3] if speaker_positions is None: return np.percentile(eles, 75) - np.percentile(eles, 25)", "np from scipy import stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double)", "theta def polar_to_single_pole(phi, theta): azimuth = phi * -1 elevation = theta +", "double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) /", "= np.cos(theta) return x, y, z def mean_dir(data, speaker): # use vector addition", "the root mean square of the discrep- ancies between perceived and physical locations", "EG of 0.''' eles = data[:,3] if speaker_positions is None: return np.percentile(eles, 75)", "with uncorrected angles: # sines, cosines = _sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1) /", "by computing the root mean square of the discrep- ancies between perceived and", "random elevation responses result in an EG of 0.''' eles = data[:,3] if", "quantified by computing the root mean square of the discrep- ancies between perceived", "- 90 return phi, theta def polar_to_single_pole(phi, theta): azimuth = phi * -1", "ref_dir=None): 'Mean absolute difference between reference directions and pointed directions' if ref_dir is", "linear regression of perceived versus physical elevations (Hofman et al., 1998). Perfect localization", "absolute difference between reference directions and pointed directions' if ref_dir is None: ref_dir", "elevations (Hofman et al., 1998). Perfect localization corresponds to an EG of 1,", "* -1 elevation = theta + 90 return azimuth, elevation def polar_to_cartesian(phi, theta):", "def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double)", "x = np.sin(theta)*np.cos(phi) y = np.sin(theta)*np.sin(phi) z = np.cos(theta) return x, y, z", "angles: # sines, cosines = _sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() #", "1998). Perfect localization corresponds to an EG of 1, while random elevation responses", "np.cos(theta) return x, y, z def mean_dir(data, speaker): # use vector addition with", "root mean square of the discrep- ancies between perceived and physical locations (RMSE,", "numpy as np from scipy import stats def double_to_single_pole(azimuth_double, elevation_double): azimuth_double, elevation_double =", "the slope of the linear regression of perceived versus physical elevations (Hofman et", "elevations for the speakers in the presented sequence regression = stats.linregress(eles, elevation_seq) return", "# sines, cosines = _sines_cosines(data, speaker) # return numpy.rad2deg(sines.sum(axis=1) / cosines.sum(axis=1)).flatten() # use", "np.where(data[:, 1] == speaker) return data[idx, 2:4].mean(axis=1) def mad(data, speaker, ref_dir=None): 'Mean absolute", "speaker) idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir dist =", "np.cos(elevation_double)) return np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi = -1 * azimuth theta =", "azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double)) return", "computing the root mean square of the discrep- ancies between perceived and physical", "np.rad2deg(azimuth_single) def single_pole_to_polar(azimuth, elevation): phi = -1 * azimuth theta = elevation -", "return np.percentile(eles, 75) - np.percentile(eles, 25) speaker_seq = data[:,1].astype(int) # presented sequence of", "speaker_positions[speaker_seq,1] # get the elevations for the speakers in the presented sequence regression", "responses result in an EG of 0.''' eles = data[:,3] if speaker_positions is", "sequence of speaker numbers elevation_seq = speaker_positions[speaker_seq,1] # get the elevations for the", "difference between reference directions and pointed directions' if ref_dir is None: ref_dir =", "speaker numbers elevation_seq = speaker_positions[speaker_seq,1] # get the elevations for the speakers in", "corresponds to an EG of 1, while random elevation responses result in an", "= theta + 90 return azimuth, elevation def polar_to_cartesian(phi, theta): phi, theta =", "data[:,1].astype(int) # presented sequence of speaker numbers elevation_seq = speaker_positions[speaker_seq,1] # get the", "data[idx,2:4] - ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): ''' Vertical", "speaker): # use vector addition with uncorrected angles: # sines, cosines = _sines_cosines(data,", "elevation): phi = -1 * azimuth theta = elevation - 90 return phi,", "idx = np.where(data[:,1] == speaker) diffs = data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def", "= np.sin(theta)*np.sin(phi) z = np.cos(theta) return x, y, z def mean_dir(data, speaker): #", "- np.percentile(eles, 25) speaker_seq = data[:,1].astype(int) # presented sequence of speaker numbers elevation_seq", "dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): ''' Vertical localization performance was", "get the elevations for the speakers in the presented sequence regression = stats.linregress(eles,", "performance was also quantified by the EG, defined as the slope of the", "elevation_double): azimuth_double, elevation_double = np.deg2rad(azimuth_double), np.deg2rad(elevation_double) azimuth_single = np.arctan(np.sin(azimuth_double) / np.cos(azimuth_double) / np.cos(elevation_double))", "and horizontal localization accuracies were quantified by computing the root mean square of", "physical locations (RMSE, Hartmann, 1983; Savel, 2009).' if ref_dir is None: ref_dir =", "in an EG of 0.''' eles = data[:,3] if speaker_positions is None: return", "ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None): ''' Vertical localization performance", "= data[:,3] if speaker_positions is None: return np.percentile(eles, 75) - np.percentile(eles, 25) speaker_seq", "accuracies were quantified by computing the root mean square of the discrep- ancies", "diffs = data[idx,2:4] - ref_dir return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None): 'Vertical and", "diffs = data[idx,2:4] - ref_dir dist = np.sqrt((diffs**2).sum(axis=2)) return np.sqrt((dist**2).mean()) def eg(data, speaker_positions=None):", "# use regular addition with corrected angles: idx = np.where(data[:, 1] == speaker)", "y, z def mean_dir(data, speaker): # use vector addition with uncorrected angles: #", "return np.sqrt((diffs**2).sum(axis=2)).mean() def rmse(data, speaker, ref_dir=None): 'Vertical and horizontal localization accuracies were quantified" ]
[]
[ "\"\"\"Builds the list of arguments for the shell. Args: shell_args: List of arguments", "iff the test succeeded, False otherwise. \"\"\" arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line", "the test succeeded, False otherwise. \"\"\" arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line =", "apptest_args: Parameters to be passed to the apptest app. Returns: Single list of", "\" \".join(apptest_args))) result.append(apptest_url) return result def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout, output_test): \"\"\"Runs", "<filename>mojo/devtools/common/devtoolslib/apptest.py<gh_stars>1-10 # Copyright 2015 The Chromium Authors. All rights reserved. # Use of", "\" + \" \".join([\"%r\" % x for x in arguments]) _logger.debug(\"Starting: \" +", "the shell output and returning True iff the output indicates a successful run.", "or did_time_out or not output_test(output): print 'Failed test: %r' % command_line if exit_code:", "source code is governed by a BSD-style license that can be # found", "of this source code is governed by a BSD-style license that can be", "to be passed to the apptest app. output_test: Function accepting the shell output", "apptest app. output_test: Function accepting the shell output and returning True iff the", "%r' % command_line if exit_code: print ' due to shell exit code %d'", "apptest_url, apptest_args, timeout, output_test): \"\"\"Runs shell with the given arguments, retrieves the output", "+ command_line) start_time = time.time() (exit_code, output, did_time_out) = shell.run_and_get_output(arguments, timeout) run_time =", "Parameters to be passed to the apptest app. Returns: Single list of shell", "% command_line if exit_code: print ' due to shell exit code %d' %", "%d' % exit_code elif did_time_out: print ' due to exceeded timeout of %fs'", "Args: shell: Wrapper around concrete Mojo shell, implementing devtools Shell interface. shell_args: List", "of arguments for the shell. Args: shell_args: List of arguments for the shell", "the shell. Args: shell_args: List of arguments for the shell run. apptest_url: Url", "to the apptest app. output_test: Function accepting the shell output and returning True", "time.time() (exit_code, output, did_time_out) = shell.run_and_get_output(arguments, timeout) run_time = time.time() - start_time _logger.debug(\"Completed:", "Args: shell_args: List of arguments for the shell run. apptest_url: Url of the", "shell arguments. \"\"\" result = list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url, \"", "' due to exceeded timeout of %fs' % timeout else: print ' due", "passed to the apptest app. output_test: Function accepting the shell output and returning", "passed to the apptest app. Returns: Single list of shell arguments. \"\"\" result", "return result def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout, output_test): \"\"\"Runs shell with the", "\"mojo_shell \" + \" \".join([\"%r\" % x for x in arguments]) _logger.debug(\"Starting: \"", "\"\"\"Runs shell with the given arguments, retrieves the output and applies |output_test| to", "output_test): \"\"\"Runs shell with the given arguments, retrieves the output and applies |output_test|", "run was successful. Args: shell: Wrapper around concrete Mojo shell, implementing devtools Shell", "\".join(apptest_args))) result.append(apptest_url) return result def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout, output_test): \"\"\"Runs shell", "than 3 second. if run_time >= 3: _logger.info(\"Test took %.3f seconds: %s\" %", "to exceeded timeout of %fs' % timeout else: print ' due to test", "test results' print 72 * '-' print output print 72 * '-' return", "print 72 * '-' print output print 72 * '-' return False return", "run. Returns: True iff the test succeeded, False otherwise. \"\"\" arguments = _build_shell_arguments(shell_args,", "Only log if it took more than 3 second. if run_time >= 3:", "% (apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return result def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout,", "did_time_out) = shell.run_and_get_output(arguments, timeout) run_time = time.time() - start_time _logger.debug(\"Completed: \" + command_line)", "it took more than 3 second. if run_time >= 3: _logger.info(\"Test took %.3f", "_logger.info(\"Test took %.3f seconds: %s\" % (run_time, command_line)) if exit_code or did_time_out or", "(run_time, command_line)) if exit_code or did_time_out or not output_test(output): print 'Failed test: %r'", "determine if the run was successful. Args: shell: Wrapper around concrete Mojo shell,", "exit_code elif did_time_out: print ' due to exceeded timeout of %fs' % timeout", "timeout else: print ' due to test results' print 72 * '-' print", "run. apptest_url: Url of the apptest app to run. apptest_args: Parameters to be", "Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source", "this source code is governed by a BSD-style license that can be #", "that can be # found in the LICENSE file. \"\"\"Apptest is a Mojo", "found in the LICENSE file. \"\"\"Apptest is a Mojo application that interacts with", "def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the list of arguments for the shell. Args:", "indicates a successful run. Returns: True iff the test succeeded, False otherwise. \"\"\"", "tested. \"\"\" import logging import time _logger = logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args):", "apptest_args, timeout, output_test): \"\"\"Runs shell with the given arguments, retrieves the output and", "app. output_test: Function accepting the shell output and returning True iff the output", "time.time() - start_time _logger.debug(\"Completed: \" + command_line) # Only log if it took", "command_line if exit_code: print ' due to shell exit code %d' % exit_code", "log if it took more than 3 second. if run_time >= 3: _logger.info(\"Test", "+ \" \".join([\"%r\" % x for x in arguments]) _logger.debug(\"Starting: \" + command_line)", "results' print 72 * '-' print output print 72 * '-' return False", "run_time = time.time() - start_time _logger.debug(\"Completed: \" + command_line) # Only log if", "|output_test| to determine if the run was successful. Args: shell: Wrapper around concrete", "= \"mojo_shell \" + \" \".join([\"%r\" % x for x in arguments]) _logger.debug(\"Starting:", "\"\"\" arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line = \"mojo_shell \" + \" \".join([\"%r\"", "_logger.debug(\"Starting: \" + command_line) start_time = time.time() (exit_code, output, did_time_out) = shell.run_and_get_output(arguments, timeout)", "command_line) start_time = time.time() (exit_code, output, did_time_out) = shell.run_and_get_output(arguments, timeout) run_time = time.time()", "apptest_url: Url of the apptest app to run. apptest_args: Parameters to be passed", "\" + command_line) # Only log if it took more than 3 second.", "arguments for the shell run. apptest_url: Url of the apptest app to run.", "= time.time() (exit_code, output, did_time_out) = shell.run_and_get_output(arguments, timeout) run_time = time.time() - start_time", "shell.run_and_get_output(arguments, timeout) run_time = time.time() - start_time _logger.debug(\"Completed: \" + command_line) # Only", "if exit_code or did_time_out or not output_test(output): print 'Failed test: %r' % command_line", "or not output_test(output): print 'Failed test: %r' % command_line if exit_code: print '", "2015 The Chromium Authors. All rights reserved. # Use of this source code", "command_line) # Only log if it took more than 3 second. if run_time", "output indicates a successful run. Returns: True iff the test succeeded, False otherwise.", "concrete Mojo shell, implementing devtools Shell interface. shell_args: List of arguments for the", "the output indicates a successful run. Returns: True iff the test succeeded, False", "of the apptest app to run. apptest_args: Parameters to be passed to the", "print ' due to test results' print 72 * '-' print output print", "Wrapper around concrete Mojo shell, implementing devtools Shell interface. shell_args: List of arguments", "True iff the output indicates a successful run. Returns: True iff the test", "app being tested. \"\"\" import logging import time _logger = logging.getLogger() def _build_shell_arguments(shell_args,", "the shell run. apptest_url: Url of the apptest app to run. apptest_args: Parameters", "import time _logger = logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the list of", "with another Mojo application and verifies assumptions about behavior of the app being", "more than 3 second. if run_time >= 3: _logger.info(\"Test took %.3f seconds: %s\"", "' due to shell exit code %d' % exit_code elif did_time_out: print '", "(apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return result def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout, output_test):", "app to run. apptest_args: Parameters to be passed to the apptest app. output_test:", "for x in arguments]) _logger.debug(\"Starting: \" + command_line) start_time = time.time() (exit_code, output,", "to shell exit code %d' % exit_code elif did_time_out: print ' due to", "output, did_time_out) = shell.run_and_get_output(arguments, timeout) run_time = time.time() - start_time _logger.debug(\"Completed: \" +", "timeout, output_test): \"\"\"Runs shell with the given arguments, retrieves the output and applies", "devtools Shell interface. shell_args: List of arguments for the shell run. apptest_url: Url", "the apptest app to run. apptest_args: Parameters to be passed to the apptest", "around concrete Mojo shell, implementing devtools Shell interface. shell_args: List of arguments for", "another Mojo application and verifies assumptions about behavior of the app being tested.", "of shell arguments. \"\"\" result = list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url,", "logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the list of arguments for the shell.", "apptest app. Returns: Single list of shell arguments. \"\"\" result = list(shell_args) if", "code %d' % exit_code elif did_time_out: print ' due to exceeded timeout of", "the app being tested. \"\"\" import logging import time _logger = logging.getLogger() def", "the run was successful. Args: shell: Wrapper around concrete Mojo shell, implementing devtools", "timeout) run_time = time.time() - start_time _logger.debug(\"Completed: \" + command_line) # Only log", "test succeeded, False otherwise. \"\"\" arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line = \"mojo_shell", "apptest_args: Parameters to be passed to the apptest app. output_test: Function accepting the", "BSD-style license that can be # found in the LICENSE file. \"\"\"Apptest is", "Mojo application that interacts with another Mojo application and verifies assumptions about behavior", "of the app being tested. \"\"\" import logging import time _logger = logging.getLogger()", "to be passed to the apptest app. Returns: Single list of shell arguments.", "Authors. All rights reserved. # Use of this source code is governed by", "that interacts with another Mojo application and verifies assumptions about behavior of the", "Returns: Single list of shell arguments. \"\"\" result = list(shell_args) if apptest_args: result.append(\"--args-for=%s", "the output and applies |output_test| to determine if the run was successful. Args:", "% (run_time, command_line)) if exit_code or did_time_out or not output_test(output): print 'Failed test:", "\"\"\"Apptest is a Mojo application that interacts with another Mojo application and verifies", "(exit_code, output, did_time_out) = shell.run_and_get_output(arguments, timeout) run_time = time.time() - start_time _logger.debug(\"Completed: \"", "for the shell. Args: shell_args: List of arguments for the shell run. apptest_url:", "3 second. if run_time >= 3: _logger.info(\"Test took %.3f seconds: %s\" % (run_time,", "to the apptest app. Returns: Single list of shell arguments. \"\"\" result =", "shell exit code %d' % exit_code elif did_time_out: print ' due to exceeded", "= time.time() - start_time _logger.debug(\"Completed: \" + command_line) # Only log if it", "The Chromium Authors. All rights reserved. # Use of this source code is", "Use of this source code is governed by a BSD-style license that can", "be passed to the apptest app. output_test: Function accepting the shell output and", "shell with the given arguments, retrieves the output and applies |output_test| to determine", "result.append(apptest_url) return result def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout, output_test): \"\"\"Runs shell with", "retrieves the output and applies |output_test| to determine if the run was successful.", "Mojo shell, implementing devtools Shell interface. shell_args: List of arguments for the shell", "due to exceeded timeout of %fs' % timeout else: print ' due to", "shell. Args: shell_args: List of arguments for the shell run. apptest_url: Url of", "of %fs' % timeout else: print ' due to test results' print 72", "result = list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return", "was successful. Args: shell: Wrapper around concrete Mojo shell, implementing devtools Shell interface.", "is a Mojo application that interacts with another Mojo application and verifies assumptions", "apptest app to run. apptest_args: Parameters to be passed to the apptest app.", ">= 3: _logger.info(\"Test took %.3f seconds: %s\" % (run_time, command_line)) if exit_code or", "did_time_out or not output_test(output): print 'Failed test: %r' % command_line if exit_code: print", "if exit_code: print ' due to shell exit code %d' % exit_code elif", "Url of the apptest app to run. apptest_args: Parameters to be passed to", "and applies |output_test| to determine if the run was successful. Args: shell: Wrapper", "a successful run. Returns: True iff the test succeeded, False otherwise. \"\"\" arguments", "due to shell exit code %d' % exit_code elif did_time_out: print ' due", "a BSD-style license that can be # found in the LICENSE file. \"\"\"Apptest", "if it took more than 3 second. if run_time >= 3: _logger.info(\"Test took", "exit_code: print ' due to shell exit code %d' % exit_code elif did_time_out:", "list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return result def", "Parameters to be passed to the apptest app. output_test: Function accepting the shell", "apptest_args) command_line = \"mojo_shell \" + \" \".join([\"%r\" % x for x in", "else: print ' due to test results' print 72 * '-' print output", "apptest_args): \"\"\"Builds the list of arguments for the shell. Args: shell_args: List of", "arguments, retrieves the output and applies |output_test| to determine if the run was", "of arguments for the shell run. apptest_url: Url of the apptest app to", "Function accepting the shell output and returning True iff the output indicates a", "run. apptest_args: Parameters to be passed to the apptest app. output_test: Function accepting", "code is governed by a BSD-style license that can be # found in", "returning True iff the output indicates a successful run. Returns: True iff the", "arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line = \"mojo_shell \" + \" \".join([\"%r\" %", "took %.3f seconds: %s\" % (run_time, command_line)) if exit_code or did_time_out or not", "print ' due to shell exit code %d' % exit_code elif did_time_out: print", "the apptest app. Returns: Single list of shell arguments. \"\"\" result = list(shell_args)", "% x for x in arguments]) _logger.debug(\"Starting: \" + command_line) start_time = time.time()", "rights reserved. # Use of this source code is governed by a BSD-style", "%s\" % (run_time, command_line)) if exit_code or did_time_out or not output_test(output): print 'Failed", "True iff the test succeeded, False otherwise. \"\"\" arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args)", "\"\"\" result = list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url, \" \".join(apptest_args))) result.append(apptest_url)", "seconds: %s\" % (run_time, command_line)) if exit_code or did_time_out or not output_test(output): print", "_build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the list of arguments for the shell. Args: shell_args:", "shell_args: List of arguments for the shell run. apptest_url: Url of the apptest", "- start_time _logger.debug(\"Completed: \" + command_line) # Only log if it took more", "%.3f seconds: %s\" % (run_time, command_line)) if exit_code or did_time_out or not output_test(output):", "to determine if the run was successful. Args: shell: Wrapper around concrete Mojo", "verifies assumptions about behavior of the app being tested. \"\"\" import logging import", "_logger = logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the list of arguments for", "can be # found in the LICENSE file. \"\"\"Apptest is a Mojo application", "took more than 3 second. if run_time >= 3: _logger.info(\"Test took %.3f seconds:", "%fs' % timeout else: print ' due to test results' print 72 *", "a Mojo application that interacts with another Mojo application and verifies assumptions about", "and verifies assumptions about behavior of the app being tested. \"\"\" import logging", "list of arguments for the shell. Args: shell_args: List of arguments for the", "\" \".join([\"%r\" % x for x in arguments]) _logger.debug(\"Starting: \" + command_line) start_time", "All rights reserved. # Use of this source code is governed by a", "if the run was successful. Args: shell: Wrapper around concrete Mojo shell, implementing", "application and verifies assumptions about behavior of the app being tested. \"\"\" import", "run_time >= 3: _logger.info(\"Test took %.3f seconds: %s\" % (run_time, command_line)) if exit_code", "elif did_time_out: print ' due to exceeded timeout of %fs' % timeout else:", "by a BSD-style license that can be # found in the LICENSE file.", "successful. Args: shell: Wrapper around concrete Mojo shell, implementing devtools Shell interface. shell_args:", "shell run. apptest_url: Url of the apptest app to run. apptest_args: Parameters to", "license that can be # found in the LICENSE file. \"\"\"Apptest is a", "import logging import time _logger = logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the", "arguments for the shell. Args: shell_args: List of arguments for the shell run.", "the LICENSE file. \"\"\"Apptest is a Mojo application that interacts with another Mojo", "Returns: True iff the test succeeded, False otherwise. \"\"\" arguments = _build_shell_arguments(shell_args, apptest_url,", "x for x in arguments]) _logger.debug(\"Starting: \" + command_line) start_time = time.time() (exit_code,", "is governed by a BSD-style license that can be # found in the", "the given arguments, retrieves the output and applies |output_test| to determine if the", "= logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the list of arguments for the", "succeeded, False otherwise. \"\"\" arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line = \"mojo_shell \"", "'Failed test: %r' % command_line if exit_code: print ' due to shell exit", "% timeout else: print ' due to test results' print 72 * '-'", "apptest_url, apptest_args) command_line = \"mojo_shell \" + \" \".join([\"%r\" % x for x", "for the shell run. apptest_url: Url of the apptest app to run. apptest_args:", "shell_args, apptest_url, apptest_args, timeout, output_test): \"\"\"Runs shell with the given arguments, retrieves the", "assumptions about behavior of the app being tested. \"\"\" import logging import time", "# found in the LICENSE file. \"\"\"Apptest is a Mojo application that interacts", "logging import time _logger = logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the list", "file. \"\"\"Apptest is a Mojo application that interacts with another Mojo application and", "governed by a BSD-style license that can be # found in the LICENSE", "in the LICENSE file. \"\"\"Apptest is a Mojo application that interacts with another", "Mojo application and verifies assumptions about behavior of the app being tested. \"\"\"", "behavior of the app being tested. \"\"\" import logging import time _logger =", "False otherwise. \"\"\" arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line = \"mojo_shell \" +", "due to test results' print 72 * '-' print output print 72 *", "command_line = \"mojo_shell \" + \" \".join([\"%r\" % x for x in arguments])", "implementing devtools Shell interface. shell_args: List of arguments for the shell run. apptest_url:", "x in arguments]) _logger.debug(\"Starting: \" + command_line) start_time = time.time() (exit_code, output, did_time_out)", "timeout of %fs' % timeout else: print ' due to test results' print", "list of shell arguments. \"\"\" result = list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\" %", "72 * '-' print output print 72 * '-' return False return True", "def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout, output_test): \"\"\"Runs shell with the given arguments,", "given arguments, retrieves the output and applies |output_test| to determine if the run", "reserved. # Use of this source code is governed by a BSD-style license", "exit code %d' % exit_code elif did_time_out: print ' due to exceeded timeout", "print ' due to exceeded timeout of %fs' % timeout else: print '", "\".join([\"%r\" % x for x in arguments]) _logger.debug(\"Starting: \" + command_line) start_time =", "result def run_apptest(shell, shell_args, apptest_url, apptest_args, timeout, output_test): \"\"\"Runs shell with the given", "= shell.run_and_get_output(arguments, timeout) run_time = time.time() - start_time _logger.debug(\"Completed: \" + command_line) #", "# Use of this source code is governed by a BSD-style license that", "% exit_code elif did_time_out: print ' due to exceeded timeout of %fs' %", "time _logger = logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds the list of arguments", "output and returning True iff the output indicates a successful run. Returns: True", "to run. apptest_args: Parameters to be passed to the apptest app. Returns: Single", "test: %r' % command_line if exit_code: print ' due to shell exit code", "run. apptest_args: Parameters to be passed to the apptest app. Returns: Single list", "be passed to the apptest app. Returns: Single list of shell arguments. \"\"\"", "being tested. \"\"\" import logging import time _logger = logging.getLogger() def _build_shell_arguments(shell_args, apptest_url,", "if apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return result def run_apptest(shell,", "applies |output_test| to determine if the run was successful. Args: shell: Wrapper around", "did_time_out: print ' due to exceeded timeout of %fs' % timeout else: print", "' due to test results' print 72 * '-' print output print 72", "and returning True iff the output indicates a successful run. Returns: True iff", "print 'Failed test: %r' % command_line if exit_code: print ' due to shell", "start_time = time.time() (exit_code, output, did_time_out) = shell.run_and_get_output(arguments, timeout) run_time = time.time() -", "if run_time >= 3: _logger.info(\"Test took %.3f seconds: %s\" % (run_time, command_line)) if", "Chromium Authors. All rights reserved. # Use of this source code is governed", "= list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return result", "to run. apptest_args: Parameters to be passed to the apptest app. output_test: Function", "Single list of shell arguments. \"\"\" result = list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\"", "in arguments]) _logger.debug(\"Starting: \" + command_line) start_time = time.time() (exit_code, output, did_time_out) =", "arguments]) _logger.debug(\"Starting: \" + command_line) start_time = time.time() (exit_code, output, did_time_out) = shell.run_and_get_output(arguments,", "shell: Wrapper around concrete Mojo shell, implementing devtools Shell interface. shell_args: List of", "otherwise. \"\"\" arguments = _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line = \"mojo_shell \" + \"", "interface. shell_args: List of arguments for the shell run. apptest_url: Url of the", "successful run. Returns: True iff the test succeeded, False otherwise. \"\"\" arguments =", "app to run. apptest_args: Parameters to be passed to the apptest app. Returns:", "run_apptest(shell, shell_args, apptest_url, apptest_args, timeout, output_test): \"\"\"Runs shell with the given arguments, retrieves", "be # found in the LICENSE file. \"\"\"Apptest is a Mojo application that", "LICENSE file. \"\"\"Apptest is a Mojo application that interacts with another Mojo application", "application that interacts with another Mojo application and verifies assumptions about behavior of", "apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return result def run_apptest(shell, shell_args,", "\"\"\" import logging import time _logger = logging.getLogger() def _build_shell_arguments(shell_args, apptest_url, apptest_args): \"\"\"Builds", "about behavior of the app being tested. \"\"\" import logging import time _logger", "# Only log if it took more than 3 second. if run_time >=", "not output_test(output): print 'Failed test: %r' % command_line if exit_code: print ' due", "the apptest app. output_test: Function accepting the shell output and returning True iff", "\" + command_line) start_time = time.time() (exit_code, output, did_time_out) = shell.run_and_get_output(arguments, timeout) run_time", "start_time _logger.debug(\"Completed: \" + command_line) # Only log if it took more than", "with the given arguments, retrieves the output and applies |output_test| to determine if", "Shell interface. shell_args: List of arguments for the shell run. apptest_url: Url of", "output and applies |output_test| to determine if the run was successful. Args: shell:", "command_line)) if exit_code or did_time_out or not output_test(output): print 'Failed test: %r' %", "arguments. \"\"\" result = list(shell_args) if apptest_args: result.append(\"--args-for=%s %s\" % (apptest_url, \" \".join(apptest_args)))", "result.append(\"--args-for=%s %s\" % (apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return result def run_apptest(shell, shell_args, apptest_url,", "output_test: Function accepting the shell output and returning True iff the output indicates", "iff the output indicates a successful run. Returns: True iff the test succeeded,", "app. Returns: Single list of shell arguments. \"\"\" result = list(shell_args) if apptest_args:", "%s\" % (apptest_url, \" \".join(apptest_args))) result.append(apptest_url) return result def run_apptest(shell, shell_args, apptest_url, apptest_args,", "shell, implementing devtools Shell interface. shell_args: List of arguments for the shell run.", "accepting the shell output and returning True iff the output indicates a successful", "second. if run_time >= 3: _logger.info(\"Test took %.3f seconds: %s\" % (run_time, command_line))", "_logger.debug(\"Completed: \" + command_line) # Only log if it took more than 3", "apptest_url, apptest_args): \"\"\"Builds the list of arguments for the shell. Args: shell_args: List", "exit_code or did_time_out or not output_test(output): print 'Failed test: %r' % command_line if", "_build_shell_arguments(shell_args, apptest_url, apptest_args) command_line = \"mojo_shell \" + \" \".join([\"%r\" % x for", "output_test(output): print 'Failed test: %r' % command_line if exit_code: print ' due to", "List of arguments for the shell run. apptest_url: Url of the apptest app", "# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this", "to test results' print 72 * '-' print output print 72 * '-'", "+ command_line) # Only log if it took more than 3 second. if", "= _build_shell_arguments(shell_args, apptest_url, apptest_args) command_line = \"mojo_shell \" + \" \".join([\"%r\" % x", "3: _logger.info(\"Test took %.3f seconds: %s\" % (run_time, command_line)) if exit_code or did_time_out", "the list of arguments for the shell. Args: shell_args: List of arguments for", "exceeded timeout of %fs' % timeout else: print ' due to test results'", "shell output and returning True iff the output indicates a successful run. Returns:", "interacts with another Mojo application and verifies assumptions about behavior of the app" ]
[ "int) -> None: self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(log_level) @property def log_level(self) -> int: self.logger.level", "abc import ABC class LoggingBase(ABC): def __init__(self, log_level: int) -> None: self.logger =", "ABC class LoggingBase(ABC): def __init__(self, log_level: int) -> None: self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(log_level)", "def __init__(self, log_level: int) -> None: self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(log_level) @property def log_level(self)", "from abc import ABC class LoggingBase(ABC): def __init__(self, log_level: int) -> None: self.logger", "import logging from abc import ABC class LoggingBase(ABC): def __init__(self, log_level: int) ->", "logging from abc import ABC class LoggingBase(ABC): def __init__(self, log_level: int) -> None:", "log_level: int) -> None: self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(log_level) @property def log_level(self) -> int:", "__init__(self, log_level: int) -> None: self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(log_level) @property def log_level(self) ->", "import ABC class LoggingBase(ABC): def __init__(self, log_level: int) -> None: self.logger = logging.getLogger(self.__class__.__name__)", "LoggingBase(ABC): def __init__(self, log_level: int) -> None: self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(log_level) @property def", "class LoggingBase(ABC): def __init__(self, log_level: int) -> None: self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(log_level) @property" ]
[ "if not found: tracking_id = None event_data = {'url': request.path_info, 'method': request.method} if", "\"good\" return None def process_response(self, request, response): \"\"\" Only record when we return", "request, response): \"\"\" Only record when we return HTML pages. Set a cookie", "found = True break if not found: tracking_id = None event_data = {'url':", "UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"] = \"good\" return None def process_response(self, request, response):", "javascript callback behavior to check if the user has disabled cookies response.set_cookie('user_tracking_verify', tracking_id)", "not found: tracking_id = None event_data = {'url': request.path_info, 'method': request.method} if 'user_tracking_id'", "when we return HTML pages. Set a cookie if not set \"\"\" if", "for url in urls: if url_request.find(url) >= 0: found = True break if", "= None event_data = {'url': request.path_info, 'method': request.method} if 'user_tracking_id' not in request.COOKIES:", "user has disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else: tracking_id = request.COOKIES['user_tracking_id'] register_event(tracking_id=tracking_id, event_name='server_middleware_page_view',event_data=event_data, request=request)", "= True break if not found: tracking_id = None event_data = {'url': request.path_info,", "request.method} if 'user_tracking_id' not in request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie',", "tracking_id = None event_data = {'url': request.path_info, 'method': request.method} if 'user_tracking_id' not in", "to check if the user has disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else: tracking_id =", "set \"\"\" if 'text/html' in response.get('Content-Type', ''): content = getattr(response, 'content', '') if", "django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def process_request(self, request):", "tracking import generate_new_tracking_key, register_event from django.core.urlresolvers import reverse from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE", "from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def process_request(self,", "behavior to check if the user has disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else: tracking_id", "in urls: if url_request.find(url) >= 0: found = True break if not found:", "urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False for url in urls: if url_request.find(url)", "\"\"\" Only record when we return HTML pages. Set a cookie if not", "= generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript callback behavior to check", "record when we return HTML pages. Set a cookie if not set \"\"\"", "we return HTML pages. Set a cookie if not set \"\"\" if 'text/html'", "'text/html' in response.get('Content-Type', ''): content = getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\")", "= getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0: url_request = request.path", "generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript callback behavior to check if", "return HTML pages. Set a cookie if not set \"\"\" if 'text/html' in", "register_event from django.core.urlresolvers import reverse from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\",", "response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript callback behavior to check if the", "'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0: url_request = request.path urls =", "'user_tracking_id' not in request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set", "a cookie if not set \"\"\" if 'text/html' in response.get('Content-Type', ''): content =", "def process_response(self, request, response): \"\"\" Only record when we return HTML pages. Set", "None def process_response(self, request, response): \"\"\" Only record when we return HTML pages.", "process_request(self, request): #request.session[\"test\"] = \"good\" return None def process_response(self, request, response): \"\"\" Only", "process_response(self, request, response): \"\"\" Only record when we return HTML pages. Set a", "in response.get('Content-Type', ''): content = getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >=", "if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0: url_request = request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')]", "disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else: tracking_id = request.COOKIES['user_tracking_id'] register_event(tracking_id=tracking_id, event_name='server_middleware_page_view',event_data=event_data, request=request) return response", "found: tracking_id = None event_data = {'url': request.path_info, 'method': request.method} if 'user_tracking_id' not", "reverse from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def", "reverse('user_tracking_verify')] found = False for url in urls: if url_request.find(url) >= 0: found", "Set a cookie if not set \"\"\" if 'text/html' in response.get('Content-Type', ''): content", "= getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"] = \"good\" return", "not in request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript", "USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"] = \"good\"", "import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"]", "if 'text/html' in response.get('Content-Type', ''): content = getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or", "in request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript callback", "if the user has disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else: tracking_id = request.COOKIES['user_tracking_id'] register_event(tracking_id=tracking_id,", "content = getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0: url_request =", "cookie if not set \"\"\" if 'text/html' in response.get('Content-Type', ''): content = getattr(response,", "def process_request(self, request): #request.session[\"test\"] = \"good\" return None def process_response(self, request, response): \"\"\"", "''): content = getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0: url_request", "url_request = request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False for url in", "= [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False for url in urls: if url_request.find(url) >=", "content.find(\"<body\") >= 0: url_request = request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False", "has disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else: tracking_id = request.COOKIES['user_tracking_id'] register_event(tracking_id=tracking_id, event_name='server_middleware_page_view',event_data=event_data, request=request) return", "generate_new_tracking_key, register_event from django.core.urlresolvers import reverse from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings,", "tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript callback behavior to check if the user", "return None def process_response(self, request, response): \"\"\" Only record when we return HTML", "if url_request.find(url) >= 0: found = True break if not found: tracking_id =", "request.path_info, 'method': request.method} if 'user_tracking_id' not in request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id)", "0: found = True break if not found: tracking_id = None event_data =", "= {'url': request.path_info, 'method': request.method} if 'user_tracking_id' not in request.COOKIES: tracking_id = generate_new_tracking_key()", "'') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0: url_request = request.path urls = [reverse('user_tracking_register_event'),", "if 'user_tracking_id' not in request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request)", "response.get('Content-Type', ''): content = getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0:", "request=request) #set javascript callback behavior to check if the user has disabled cookies", "import reverse from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object):", "\"\"\" if 'text/html' in response.get('Content-Type', ''): content = getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE", "False) class UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"] = \"good\" return None def process_response(self,", ">= 0: url_request = request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False for", "url_request.find(url) >= 0: found = True break if not found: tracking_id = None", "break if not found: tracking_id = None event_data = {'url': request.path_info, 'method': request.method}", "url in urls: if url_request.find(url) >= 0: found = True break if not", "USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0: url_request = request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found", "settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"] =", "event_name='server_middleware_set_cookie', request=request) #set javascript callback behavior to check if the user has disabled", "from tracking import generate_new_tracking_key, register_event from django.core.urlresolvers import reverse from django.conf import settings", "= \"good\" return None def process_response(self, request, response): \"\"\" Only record when we", "None event_data = {'url': request.path_info, 'method': request.method} if 'user_tracking_id' not in request.COOKIES: tracking_id", "register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript callback behavior to check if the user has", "if not set \"\"\" if 'text/html' in response.get('Content-Type', ''): content = getattr(response, 'content',", "callback behavior to check if the user has disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else:", "or content.find(\"<body\") >= 0: url_request = request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found =", "[reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False for url in urls: if url_request.find(url) >= 0:", "event_data = {'url': request.path_info, 'method': request.method} if 'user_tracking_id' not in request.COOKIES: tracking_id =", "the user has disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else: tracking_id = request.COOKIES['user_tracking_id'] register_event(tracking_id=tracking_id, event_name='server_middleware_page_view',event_data=event_data,", "request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript callback behavior", "= request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False for url in urls:", "= False for url in urls: if url_request.find(url) >= 0: found = True", "class UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"] = \"good\" return None def process_response(self, request,", "Only record when we return HTML pages. Set a cookie if not set", "not set \"\"\" if 'text/html' in response.get('Content-Type', ''): content = getattr(response, 'content', '')", ">= 0: found = True break if not found: tracking_id = None event_data", "'method': request.method} if 'user_tracking_id' not in request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id,", "getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"] = \"good\" return None", "0: url_request = request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False for url", "check if the user has disabled cookies response.set_cookie('user_tracking_verify', tracking_id) else: tracking_id = request.COOKIES['user_tracking_id']", "import generate_new_tracking_key, register_event from django.core.urlresolvers import reverse from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE =", "found = False for url in urls: if url_request.find(url) >= 0: found =", "from django.core.urlresolvers import reverse from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False)", "False for url in urls: if url_request.find(url) >= 0: found = True break", "{'url': request.path_info, 'method': request.method} if 'user_tracking_id' not in request.COOKIES: tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id',", "urls: if url_request.find(url) >= 0: found = True break if not found: tracking_id", "True break if not found: tracking_id = None event_data = {'url': request.path_info, 'method':", "request.path urls = [reverse('user_tracking_register_event'), reverse('user_tracking_verify')] found = False for url in urls: if", "pages. Set a cookie if not set \"\"\" if 'text/html' in response.get('Content-Type', ''):", "request): #request.session[\"test\"] = \"good\" return None def process_response(self, request, response): \"\"\" Only record", "django.core.urlresolvers import reverse from django.conf import settings USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE = getattr(settings, \"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class", "tracking_id = generate_new_tracking_key() response.set_cookie('user_tracking_id', tracking_id) register_event(tracking_id=tracking_id, event_name='server_middleware_set_cookie', request=request) #set javascript callback behavior to", "getattr(response, 'content', '') if USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE or content.find(\"<body\") >= 0: url_request = request.path urls", "#request.session[\"test\"] = \"good\" return None def process_response(self, request, response): \"\"\" Only record when", "\"USER_TRACKING_LOG_HTML_FRAGMENT_RESPONSE\", False) class UserTrackingMiddleware(object): def process_request(self, request): #request.session[\"test\"] = \"good\" return None def", "#set javascript callback behavior to check if the user has disabled cookies response.set_cookie('user_tracking_verify',", "response): \"\"\" Only record when we return HTML pages. Set a cookie if", "HTML pages. Set a cookie if not set \"\"\" if 'text/html' in response.get('Content-Type'," ]
[ "class workspaces(): @staticmethod def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size =", "workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size()", "+ ',-1,-1']) @staticmethod def get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split(' *',", "= workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord = -1 * disp_size['x'] * (workspace_size['x'] -", "x_coord, \"y\": y_coord} @staticmethod def move_window(id, desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl',", "size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod", "= -1 * disp_size['x'] * (workspace_size['x'] - 1 - x) y_coord = -1", "def get_workspace_count(): total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x) for", "get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split(' *', desc, 3) for desc", "+ str(coords['x']) + ',' + str(coords['y']) + ',-1,-1']) @staticmethod def get_windows(): windows =", "import re import subprocess class workspaces(): @staticmethod def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod", "- 1 - x) y_coord = -1 * disp_size['y'] * (workspace_size['y'] - 1-", "total_size] display = workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y):", "*', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x) for x in total_size] display =", "import subprocess class workspaces(): @staticmethod def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size():", "for x in total_size] display = workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod", "subprocess class workspaces(): @staticmethod def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size", "re import subprocess class workspaces(): @staticmethod def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def", "display = workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size", "{\"x\": x_coord, \"y\": y_coord} @staticmethod def move_window(id, desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y)", "'-l').split(\"\\n\") lines = [re.split(' *', desc, 3) for desc in windows] return [dict(zip(['id',", "_cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\",", "@staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord = -1", "- x) y_coord = -1 * disp_size['y'] * (workspace_size['y'] - 1- y) return", "\"\")))[8].split('x') return {\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod def get_workspace_count(): total_size = (re.split(' *',", "def get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split(' *', desc, 3) for", "[int(x) for x in total_size] display = workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])}", "get_display_size(): size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]), \"y\": int(size[1])}", "total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x) for x in", "desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' +", "'-i', '-r', id, '-e', '0,' + str(coords['x']) + ',' + str(coords['y']) + ',-1,-1'])", "subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return", "{\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size() workspace_size =", "@staticmethod def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size = (re.split(' *',", "= workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split(' *', desc, 3) for desc in windows]", "(re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x) for x in total_size] display", "return {\"x\": x_coord, \"y\": y_coord} @staticmethod def move_window(id, desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x,", "disp_size['y'] * (workspace_size['y'] - 1- y) return {\"x\": x_coord, \"y\": y_coord} @staticmethod def", "-1 * disp_size['y'] * (workspace_size['y'] - 1- y) return {\"x\": x_coord, \"y\": y_coord}", "= workspaces.get_workspace_count() x_coord = -1 * disp_size['x'] * (workspace_size['x'] - 1 - x)", "'-r', id, '-e', '0,' + str(coords['x']) + ',' + str(coords['y']) + ',-1,-1']) @staticmethod", "get_workspace_count(): total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x) for x", "in total_size] display = workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x,", "* disp_size['x'] * (workspace_size['x'] - 1 - x) y_coord = -1 * disp_size['y']", "_workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord = -1 * disp_size['x']", "workspace_size = workspaces.get_workspace_count() x_coord = -1 * disp_size['x'] * (workspace_size['x'] - 1 -", "* disp_size['y'] * (workspace_size['y'] - 1- y) return {\"x\": x_coord, \"y\": y_coord} @staticmethod", "-1 * disp_size['x'] * (workspace_size['x'] - 1 - x) y_coord = -1 *", "def _workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord = -1 *", "int(size[0]), \"y\": int(size[1])} @staticmethod def get_workspace_count(): total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x')", "@staticmethod def get_workspace_count(): total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x)", "disp_size['x'] * (workspace_size['x'] - 1 - x) y_coord = -1 * disp_size['y'] *", "windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split(' *', desc, 3) for desc in", "+ str(coords['y']) + ',-1,-1']) @staticmethod def get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines =", "workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split(' *', desc, 3) for desc in windows] return", "move_window(id, desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,'", "*', desc, 3) for desc in windows] return [dict(zip(['id', 'desktop', 'machine', 'title'], line))", "(re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod def get_workspace_count():", "= (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x) for x in total_size]", "id, '-e', '0,' + str(coords['x']) + ',' + str(coords['y']) + ',-1,-1']) @staticmethod def", "return {\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod def get_workspace_count(): total_size = (re.split(' *', workspaces._cmd('wmctrl',", "def get_display_size(): size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]), \"y\":", "y): disp_size = workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord = -1 * disp_size['x'] *", "def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size = (re.split(' *', workspaces._cmd('wmctrl',", "= [int(x) for x in total_size] display = workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\":", "= workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size =", "1- y) return {\"x\": x_coord, \"y\": y_coord} @staticmethod def move_window(id, desk_x, desk_y): coords", "subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' + str(coords['x']) + ',' + str(coords['y']) +", "\"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord", "+ ',' + str(coords['y']) + ',-1,-1']) @staticmethod def get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\")", "= -1 * disp_size['y'] * (workspace_size['y'] - 1- y) return {\"x\": x_coord, \"y\":", "\"\")))[3].split('x') total_size = [int(x) for x in total_size] display = workspaces.get_display_size() return {\"x\":", "(workspace_size['x'] - 1 - x) y_coord = -1 * disp_size['y'] * (workspace_size['y'] -", "lines = [re.split(' *', desc, 3) for desc in windows] return [dict(zip(['id', 'desktop',", "workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod def get_workspace_count(): total_size =", "x in total_size] display = workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def", "disp_size = workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord = -1 * disp_size['x'] * (workspace_size['x']", "workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x) for x in total_size] display = workspaces.get_display_size()", "1 - x) y_coord = -1 * disp_size['y'] * (workspace_size['y'] - 1- y)", "@staticmethod def get_display_size(): size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]),", "def move_window(id, desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r', id, '-e',", "for desc in windows] return [dict(zip(['id', 'desktop', 'machine', 'title'], line)) for line in", "desc in windows] return [dict(zip(['id', 'desktop', 'machine', 'title'], line)) for line in lines]", "workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord = -1 * disp_size['x'] * (workspace_size['x'] - 1", "x) y_coord = -1 * disp_size['y'] * (workspace_size['y'] - 1- y) return {\"x\":", "'0,' + str(coords['x']) + ',' + str(coords['y']) + ',-1,-1']) @staticmethod def get_windows(): windows", "= [re.split(' *', desc, 3) for desc in windows] return [dict(zip(['id', 'desktop', 'machine',", "y) return {\"x\": x_coord, \"y\": y_coord} @staticmethod def move_window(id, desk_x, desk_y): coords =", "desc, 3) for desc in windows] return [dict(zip(['id', 'desktop', 'machine', 'title'], line)) for", "',' + str(coords['y']) + ',-1,-1']) @staticmethod def get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines", "[re.split(' *', desc, 3) for desc in windows] return [dict(zip(['id', 'desktop', 'machine', 'title'],", "'-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod def get_workspace_count(): total_size = (re.split('", "y_coord} @staticmethod def move_window(id, desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r',", "desk_y) subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' + str(coords['x']) + ',' + str(coords['y'])", "int(size[1])} @staticmethod def get_workspace_count(): total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size =", "'-e', '0,' + str(coords['x']) + ',' + str(coords['y']) + ',-1,-1']) @staticmethod def get_windows():", "#!/usr/bin/python import re import subprocess class workspaces(): @staticmethod def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\")", "desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' + str(coords['x'])", "return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x')", "workspaces.get_workspace_count() x_coord = -1 * disp_size['x'] * (workspace_size['x'] - 1 - x) y_coord", "stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\":", "int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size() workspace_size = workspaces.get_workspace_count()", "{\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod def get_workspace_count(): total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\",", "int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size() workspace_size = workspaces.get_workspace_count() x_coord =", "coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' + str(coords['x']) +", "= workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' + str(coords['x']) + ','", "* (workspace_size['x'] - 1 - x) y_coord = -1 * disp_size['y'] * (workspace_size['y']", "\"y\": int(size[1])} @staticmethod def get_workspace_count(): total_size = (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[3].split('x') total_size", "workspaces(): @staticmethod def _cmd(*args): return subprocess.Popen(args, stdout=subprocess.PIPE).stdout.read().decode(\"utf-8\") @staticmethod def get_display_size(): size = (re.split('", "workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r', id, '-e', '0,' + str(coords['x']) + ',' +", "',-1,-1']) @staticmethod def get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split(' *', desc,", "@staticmethod def get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split(' *', desc, 3)", "3) for desc in windows] return [dict(zip(['id', 'desktop', 'machine', 'title'], line)) for line", "'-d').replace(\"\\n\", \"\")))[3].split('x') total_size = [int(x) for x in total_size] display = workspaces.get_display_size() return", "str(coords['x']) + ',' + str(coords['y']) + ',-1,-1']) @staticmethod def get_windows(): windows = workspaces._cmd('wmctrl',", "return {\"x\": int(total_size[0]/display['x']), \"y\": int(total_size[1]/display['y'])} @staticmethod def _workspace_coords_to_screen_coords(x, y): disp_size = workspaces.get_display_size() workspace_size", "- 1- y) return {\"x\": x_coord, \"y\": y_coord} @staticmethod def move_window(id, desk_x, desk_y):", "total_size = [int(x) for x in total_size] display = workspaces.get_display_size() return {\"x\": int(total_size[0]/display['x']),", "= (re.split(' *', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod def", "*', workspaces._cmd('wmctrl', '-d').replace(\"\\n\", \"\")))[8].split('x') return {\"x\": int(size[0]), \"y\": int(size[1])} @staticmethod def get_workspace_count(): total_size", "(workspace_size['y'] - 1- y) return {\"x\": x_coord, \"y\": y_coord} @staticmethod def move_window(id, desk_x,", "\"y\": y_coord} @staticmethod def move_window(id, desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i',", "str(coords['y']) + ',-1,-1']) @staticmethod def get_windows(): windows = workspaces._cmd('wmctrl', '-l').split(\"\\n\") lines = [re.split('", "@staticmethod def move_window(id, desk_x, desk_y): coords = workspaces._workspace_coords_to_screen_coords(desk_x, desk_y) subprocess.call(['wmctrl', '-i', '-r', id,", "y_coord = -1 * disp_size['y'] * (workspace_size['y'] - 1- y) return {\"x\": x_coord,", "x_coord = -1 * disp_size['x'] * (workspace_size['x'] - 1 - x) y_coord =", "* (workspace_size['y'] - 1- y) return {\"x\": x_coord, \"y\": y_coord} @staticmethod def move_window(id," ]
[ "parser.add_argument('--madmin_password', required=False, default='', type=str) args, unknown = parser.parse_known_args() return {'madmin_url': args.madmin_url.rstrip('/'), 'madmin_user': args.madmin_user.strip(),", "dict: parser = configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password', required=False,", "parse_args() -> dict: parser = configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False, default='', type=str)", "parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password', required=False, default='', type=str) args, unknown = parser.parse_known_args() return", "= configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password', required=False, default='', type=str)", "type=str) parser.add_argument('--madmin_password', required=False, default='', type=str) args, unknown = parser.parse_known_args() return {'madmin_url': args.madmin_url.rstrip('/'), 'madmin_user':", "parser = configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password', required=False, default='',", "def parse_args() -> dict: parser = configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False, default='',", "default='', type=str) args, unknown = parser.parse_known_args() return {'madmin_url': args.madmin_url.rstrip('/'), 'madmin_user': args.madmin_user.strip(), 'madmin_password': args.madmin_password.strip()}", "configargparse def parse_args() -> dict: parser = configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False,", "-> dict: parser = configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password',", "required=False, default='', type=str) args, unknown = parser.parse_known_args() return {'madmin_url': args.madmin_url.rstrip('/'), 'madmin_user': args.madmin_user.strip(), 'madmin_password':", "default='', type=str) parser.add_argument('--madmin_password', required=False, default='', type=str) args, unknown = parser.parse_known_args() return {'madmin_url': args.madmin_url.rstrip('/'),", "type=str) parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password', required=False, default='', type=str) args, unknown = parser.parse_known_args()", "required=False, default='', type=str) parser.add_argument('--madmin_password', required=False, default='', type=str) args, unknown = parser.parse_known_args() return {'madmin_url':", "parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password', required=False, default='', type=str) args, unknown", "import configargparse def parse_args() -> dict: parser = configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user',", "configargparse.ArgParser(default_config_files=['config.ini']) parser.add_argument('--madmin_url', required=True, type=str) parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password', required=False, default='', type=str) args,", "required=True, type=str) parser.add_argument('--madmin_user', required=False, default='', type=str) parser.add_argument('--madmin_password', required=False, default='', type=str) args, unknown =" ]
[ "B}) assert dag.tasks == {C} dag.remove_tasks(C) assert dag.tasks == set() # ---------------------------------------------------------------------------- #", "2019 Dynatrace LLC ## ## Licensed under the Apache License, Version 2.0 (the", "get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is not None assert dag.get_upstream()[B]", "\"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task(): A, B", "def test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1,", "(Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG magic methods # ---------------------------------------------------------------------------- def", "Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG magic methods # ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea", "assert dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks() dag = DAG()", "== set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag", "def test__DAG_remove_task(): A, B = get_two_tasks() dag = DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert", "= DAG() dag.add_dependencies({B: A}) assert dag._edges[A] == set([B]) dag = DAG() dag.add_dependencies({C: {A,", "methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B,", "assert len(dag.tasks) == 2 dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) == 2 dag", "dag.add_dependency(B, A) assert dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks() dag", "dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is not None assert dag.get_sinks() == {B} def test__DAG_is_cyclic():", "assert len(dag.tasks) == 1 dag = DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) == 2 dag", "= DAG() dag.add_dependencies({C: {A, B}}) assert dag._edges[A] == set([C]) assert dag._edges[B] == set([C])", "COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- # Helper Functions # ---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\",", "test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A,", "to the DAG\" def test__DAG_add_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag", "# ---------------------------------------------------------------------------- # DAG magic methods # ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea = Task(\"make_tea.py\",", "dag._edges[A] == set([B]) dag = DAG() dag.add_dependencies({C: {A, B}}) assert dag._edges[A] == set([C])", "B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is not None", "depends_on=A) assert dag.get_sinks() is not None assert dag.get_sinks() == {B} def test__DAG_is_cyclic(): A,", "DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies(): A, B = get_two_tasks()", "DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init(): DAG() # init with dependencies make_tea", "downstream\" def test__DAG_get_upstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert", "test__DAG_remove_task(): A, B = get_two_tasks() dag = DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks", "not added to the DAG\" def test__DAG_add_tasks(): A, B = get_two_tasks() C =", "dag.add_dependencies({C: {A, B}}) assert dag._edges[A] == set([C]) assert dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2():", "\"\"\"Unit tests for the DAG class.\"\"\" from collections import defaultdict import os import", "dag.get_upstream() == {B: {A}}, \"Task A is not upstream\" def test__DAG_get_sources(): A, B", "{A, B, C} def test__DAG_remove_task(): A, B = get_two_tasks() dag = DAG() dag.add_tasks({A,", "specific language governing permissions and ## limitations under the License. ## --------------------------------------------------------------------------- \"\"\"Unit", "DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init(): DAG() #", "2 dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) == 2 dag = DAG(downstream_dependencies={make_tea: drink_tea})", "dag.add_tasks({A, B}) assert dag.tasks == {A, B}, \"Test Tasks were not added to", "== {B} assert dag.get_downstream() == {A: {B}}, \"Task B is not downstream\" def", "A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() with pytest.raises(CyclicGraphError):", "to the DAG\" dag.add_tasks(C) assert dag.tasks == {A, B, C} def test__DAG_remove_task(): A,", "DAG\" def test__DAG_add_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag = DAG()", "add dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B = get_two_tasks() dag = DAG()", "collections import defaultdict import os import pathlib import pytest from alyeska.compose import Task,", "make_tea}) assert len(dag.tasks) == 2 dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) == 2", "= DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is not None assert dag.get_upstream()[B] == {A}", "A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources() is not", "assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks # ---------------------------------------------------------------------------- def", "test__DAG_remove_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B,", "make_tea}) assert len(dag.tasks) == 2 dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) == 2", "2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}})", "= DAG() dag.add_task(A) assert dag.tasks == {A}, \"Test Task was not added to", "{drink_tea, drink_tea}}) def test__DAG_init(): DAG() # init with dependencies make_tea = Task(\"make_tea.py\", \"test-env\")", "you may not use this file except in compliance with the License. ##", "= DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is not None assert dag.get_sinks() == {B}", "test__DAG_get_sinks(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is", "== 2 dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) == 2 def test__DAG_repr(): p", "DAG magic methods # ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea =", "A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(), \"acyclic", "dag.add_dependency(B, depends_on=A) assert dag.get_sources() is not None assert dag.get_sources() == {A} def test__DAG_get_sinks():", "pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A, C: B}) # ---------------------------------------------------------------------------- # methods # ----------------------------------------------------------------------------", "\")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task(): A, B = get_two_tasks()", "assert dag.get_sources() is not None assert dag.get_sources() == {A} def test__DAG_get_sinks(): A, B", "len(dag.tasks) == 1 dag = DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) == 2 dag =", "DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea,", "dag.tasks == {C} dag.remove_tasks(C) assert dag.tasks == set() # ---------------------------------------------------------------------------- # add dependencies", "to in writing, software ## distributed under the License is distributed on an", ") # ---------------------------------------------------------------------------- # Helper Functions # ---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"),", "with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError): DAG.from_yaml(COMPOSE_CYCLE) dag = DAG.from_yaml(COMPOSE_TRICKY)", "def test__DAG_remove_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A,", "= Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea) assert len(dag.tasks) == 1 dag = DAG(tasks={drink_tea,", "import os import pathlib import pytest from alyeska.compose import Task, DAG from alyeska.compose.exceptions", "assert dag.tasks == {A, B, C} def test__DAG_remove_task(): A, B = get_two_tasks() dag", "dag.get_upstream() is not None assert dag.get_upstream()[B] == {A} assert dag.get_upstream() == {B: {A}},", "Copyright 2019 Dynatrace LLC ## ## Licensed under the Apache License, Version 2.0", "writing, software ## distributed under the License is distributed on an \"AS IS\"", "p = pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\") dag = DAG() dag.add_task(make_tea) assert repr(dag)", "dag.get_downstream()[A] == {B} assert dag.get_downstream() == {A: {B}}, \"Task B is not downstream\"", "A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B)", "assert dag.get_downstream() is not None assert dag.get_downstream()[A] == {B} assert dag.get_downstream() == {A:", "License, Version 2.0 (the \"License\"); ## you may not use this file except", "alyeska.compose.exceptions import CyclicGraphError from test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) #", "dag.get_sources() == {A} def test__DAG_get_sinks(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B,", "is not None assert dag.get_sinks() == {B} def test__DAG_is_cyclic(): A, B = get_two_tasks()", "test__DAG_add_dependencies(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() dag.add_dependencies({B:", "\"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set,", "depends_on=A) assert dag.get_sources() is not None assert dag.get_sources() == {A} def test__DAG_get_sinks(): A,", "dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies(): A, B = get_two_tasks() C", "assert dag.get_downstream()[A] == {B} assert dag.get_downstream() == {A: {B}}, \"Task B is not", "== \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task(): A,", "== 2 def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\") dag =", "drink_tea = Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea:", "= get_two_tasks() dag = DAG() dag.add_task(A) assert dag.tasks == {A}, \"Test Task was", "= DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) == 2 dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks)", "test__DAG_add_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B})", "get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A, B}) assert", "{A}}, \"Task A is not upstream\" def test__DAG_get_sources(): A, B = get_two_tasks() dag", "dag = DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) == 2 dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert", "DAG() dag.add_dependencies({B: A}) assert dag._edges[A] == set([B]) dag = DAG() dag.add_dependencies({C: {A, B}})", "distributed under the License is distributed on an \"AS IS\" BASIS, ## WITHOUT", "COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- # Helper Functions # ---------------------------------------------------------------------------- def get_two_tasks():", "DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks == {B} def test__DAG_remove_tasks(): A, B =", "B = get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A,", "3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea:", "\"test-env\") dag = DAG(tasks=make_tea) assert len(dag.tasks) == 1 dag = DAG(tasks={drink_tea, make_tea}) assert", "# -*- coding: utf-8 -*- ## --------------------------------------------------------------------------- ## Copyright 2019 Dynatrace LLC ##", "the DAG\" dag.add_tasks(C) assert dag.tasks == {A, B, C} def test__DAG_remove_task(): A, B", "# ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A)", "# methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B = get_two_tasks() dag = DAG()", "CyclicGraphError from test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- #", "test__DAG_add_task(): A, B = get_two_tasks() dag = DAG() dag.add_task(A) assert dag.tasks == {A},", "dag = DAG() dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- #", "assert dag.get_upstream()[B] == {A} assert dag.get_upstream() == {B: {A}}, \"Task A is not", "DAG() dag.add_tasks({A, B}) assert dag.tasks == {A, B}, \"Test Tasks were not added", "\"test-env\") dag = DAG() dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ----------------------------------------------------------------------------", "{A} def test__DAG_get_sinks(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert", "the DAG\" def test__DAG_add_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag =", "test__DAG_get_upstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is", "= Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea) assert len(dag.tasks) ==", "None assert dag.get_sinks() == {B} def test__DAG_is_cyclic(): A, B = get_two_tasks() dag =", "test__DAG_get_sources(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources() is", "\"AS IS\" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is not None assert dag.get_sinks() == {B} def", "= Task(\"C.py\", env=\"test-env\") dag = DAG() dag.add_dependencies({B: A}) assert dag._edges[A] == set([B]) dag", "dag.tasks == {B} def test__DAG_remove_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag", "os import pathlib import pytest from alyeska.compose import Task, DAG from alyeska.compose.exceptions import", "def test__DAG_add_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A,", "not None assert dag.get_sources() == {A} def test__DAG_get_sinks(): A, B = get_two_tasks() dag", "{B} def test__DAG_remove_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag = DAG()", "def test__DAG_add_task(): A, B = get_two_tasks() dag = DAG() dag.add_task(A) assert dag.tasks ==", "# DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task(): A, B = get_two_tasks() dag = DAG()", "pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\") dag = DAG() dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\",", "---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG magic", "Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea) assert len(dag.tasks) == 1 dag = DAG(tasks={drink_tea, make_tea})", "dag.add_dependency(A, B) def test__DAG_add_dependencies(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag", "# ---------------------------------------------------------------------------- # DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task(): A, B = get_two_tasks() dag", "DAG\" dag.add_tasks(C) assert dag.tasks == {A, B, C} def test__DAG_remove_task(): A, B =", "C}) dag.remove_tasks({A, B}) assert dag.tasks == {C} dag.remove_tasks(C) assert dag.tasks == set() #", "---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) assert", "License. ## --------------------------------------------------------------------------- \"\"\"Unit tests for the DAG class.\"\"\" from collections import defaultdict", "Version 2.0 (the \"License\"); ## you may not use this file except in", "B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) assert dag._edges[A] == set([B]) def", "dag.add_task(A) assert dag.tasks == {A}, \"Test Task was not added to the DAG\"", "depends_on=A) assert not dag.is_cyclic(), \"acyclic graph idenfied as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B)", "\"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea) assert len(dag.tasks) == 1 dag", "assert dag.get_sinks() == {B} def test__DAG_is_cyclic(): A, B = get_two_tasks() dag = DAG()", "B}, \"Test Tasks were not added to the DAG\" dag.add_tasks(C) assert dag.tasks ==", "def test__DAG_get_sources(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources()", "== set() # ---------------------------------------------------------------------------- # add dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B", "get_two_tasks() dag = DAG() dag.add_task(A) assert dag.tasks == {A}, \"Test Task was not", "dag = DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A, B}) assert dag.tasks == {C} dag.remove_tasks(C)", "= DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A, B}) assert dag.tasks == {C} dag.remove_tasks(C) assert", "test__DAG_init(): DAG() # init with dependencies make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\",", "assert dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks() C = Task(\"C.py\",", "C: B}) # ---------------------------------------------------------------------------- # methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B =", "DAG() dag.add_dependency(B, A) assert dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks()", "def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG magic methods", "assert len(dag.tasks) == 2 def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\")", "A) assert dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks() dag =", "def test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError):", "CONDITIONS OF ANY KIND, either express or implied. ## See the License for", "# Helper Functions # ---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) #", "DAG() # init with dependencies make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\")", "C, B: A, C: B}) # ---------------------------------------------------------------------------- # methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream():", "assert dag.get_sources() == {A} def test__DAG_get_sinks(): A, B = get_two_tasks() dag = DAG()", "= pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\") dag = DAG() dag.add_task(make_tea) assert repr(dag) ==", "agreed to in writing, software ## distributed under the License is distributed on", "DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task(): A, B = get_two_tasks() dag = DAG() dag.add_task(A)", "permissions and ## limitations under the License. ## --------------------------------------------------------------------------- \"\"\"Unit tests for the", "upstream\" def test__DAG_get_sources(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert", "a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required", "dag.add_dependencies({A: C, B: A, C: B}) # ---------------------------------------------------------------------------- # methods # ---------------------------------------------------------------------------- def", "or implied. ## See the License for the specific language governing permissions and", "= get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is not None assert", "= Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with", "= get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C,", "# ---------------------------------------------------------------------------- def test__DAG_add_task(): A, B = get_two_tasks() dag = DAG() dag.add_task(A) assert", "IS\" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with", "{make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}})", "language governing permissions and ## limitations under the License. ## --------------------------------------------------------------------------- \"\"\"Unit tests", "with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task:", "software ## distributed under the License is distributed on an \"AS IS\" BASIS,", "with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A, C: B}) # ---------------------------------------------------------------------------- # methods #", "= Task(\"C.py\", env=\"test-env\") dag = DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A, C:", "{A} assert dag.get_upstream() == {B: {A}}, \"Task A is not upstream\" def test__DAG_get_sources():", "get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG magic methods #", "## Licensed under the Apache License, Version 2.0 (the \"License\"); ## you may", "env=\"test-env\") dag = DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A, C: B}) #", "[drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def", "len(dag.tasks) == 2 dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) == 2 dag =", "{A: {B}}, \"Task B is not downstream\" def test__DAG_get_upstream(): A, B = get_two_tasks()", "dag._edges[A] == set([C]) assert dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks()", "assert not dag.is_cyclic(), \"acyclic graph idenfied as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def", "dag = DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks == {B} def test__DAG_remove_tasks(): A,", "## Unless required by applicable law or agreed to in writing, software ##", "B) def test__DAG_add_dependencies(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag =", "(the \"License\"); ## you may not use this file except in compliance with", "idenfied as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError): DAG.from_yaml(COMPOSE_CYCLE)", "test__DAG_get_downstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is", "from test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- # Helper", "DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is not None assert dag.get_downstream()[A] == {B} assert", "not None assert dag.get_sinks() == {B} def test__DAG_is_cyclic(): A, B = get_two_tasks() dag", "express or implied. ## See the License for the specific language governing permissions", "init with dependencies make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") dag =", "applicable law or agreed to in writing, software ## distributed under the License", "B, C}) dag.remove_tasks({A, B}) assert dag.tasks == {C} dag.remove_tasks(C) assert dag.tasks == set()", "= DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is not None assert dag.get_downstream()[A] == {B}", "## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in", "C = Task(\"C.py\", env=\"test-env\") dag = DAG() dag.add_dependencies({B: A}) assert dag._edges[A] == set([B])", "get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is not None assert dag.get_sinks()", "COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- # Helper Functions # ---------------------------------------------------------------------------- def", "with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init():", "A, B = get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B}) assert", "assert dag.tasks == {A}, \"Test Task was not added to the DAG\" def", "= DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources() is not None assert dag.get_sources() == {A}", "B is not downstream\" def test__DAG_get_upstream(): A, B = get_two_tasks() dag = DAG()", "Unless required by applicable law or agreed to in writing, software ## distributed", "dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is not None assert dag.get_downstream()[A] == {B} assert dag.get_downstream()", "Task(p, \"test-env\") dag = DAG() dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) #", "is not None assert dag.get_downstream()[A] == {B} assert dag.get_downstream() == {A: {B}}, \"Task", "B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(), \"acyclic graph", "{A, B}}) assert dag._edges[A] == set([C]) assert dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2(): A,", "# ---------------------------------------------------------------------------- # Helper Functions # ---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\",", "is not None assert dag.get_upstream()[B] == {A} assert dag.get_upstream() == {B: {A}}, \"Task", "assert dag.tasks == {B} def test__DAG_remove_tasks(): A, B = get_two_tasks() C = Task(\"C.py\")", "except in compliance with the License. ## You may obtain a copy of", "## --------------------------------------------------------------------------- \"\"\"Unit tests for the DAG class.\"\"\" from collections import defaultdict import", "= Task(\"C.py\") dag = DAG() dag.add_tasks({A, B}) assert dag.tasks == {A, B}, \"Test", "== {A} def test__DAG_get_sinks(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A)", "env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG magic methods # ---------------------------------------------------------------------------- def test__validate_dependency():", "--------------------------------------------------------------------------- ## Copyright 2019 Dynatrace LLC ## ## Licensed under the Apache License,", "pytest from alyeska.compose import Task, DAG from alyeska.compose.exceptions import CyclicGraphError from test_compose_globals import", "== {A, B}, \"Test Tasks were not added to the DAG\" dag.add_tasks(C) assert", "assert dag.get_upstream() is not None assert dag.get_upstream()[B] == {A} assert dag.get_upstream() == {B:", "depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError): DAG.from_yaml(COMPOSE_CYCLE) dag = DAG.from_yaml(COMPOSE_TRICKY) assert len(dag.tasks) >", "this file except in compliance with the License. ## You may obtain a", "= get_two_tasks() dag = DAG() dag.add_dependency(B, A) assert dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle():", "alyeska.compose import Task, DAG from alyeska.compose.exceptions import CyclicGraphError from test_compose_globals import ( COMPOSE_SMALL,", "BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "len(dag.tasks) == 2 dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) == 2 def test__DAG_repr():", "{B: {A}}, \"Task A is not upstream\" def test__DAG_get_sources(): A, B = get_two_tasks()", "is not None assert dag.get_sources() == {A} def test__DAG_get_sinks(): A, B = get_two_tasks()", "## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to", "== 1 dag = DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) == 2 dag = DAG(upstream_dependencies={drink_tea:", "License for the specific language governing permissions and ## limitations under the License.", "C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B}) assert dag.tasks == {A, B},", "len(dag.tasks) == 2 def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\") dag", "{B} assert dag.get_downstream() == {A: {B}}, \"Task B is not downstream\" def test__DAG_get_upstream():", "not added to the DAG\" dag.add_tasks(C) assert dag.tasks == {A, B, C} def", "B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() dag.add_dependencies({B: A}) assert", "= DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) == 2 dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks)", "== set([B]) def test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A)", "## --------------------------------------------------------------------------- ## Copyright 2019 Dynatrace LLC ## ## Licensed under the Apache", "def test__DAG_get_upstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream()", "== {C} dag.remove_tasks(C) assert dag.tasks == set() # ---------------------------------------------------------------------------- # add dependencies #", "def test__DAG_add_dependency(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) assert dag._edges[A]", "License. ## You may obtain a copy of the License at ## ##", "dag.add_tasks(C) assert dag.tasks == {A, B, C} def test__DAG_remove_task(): A, B = get_two_tasks()", "C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A, B}) assert dag.tasks", "DAG() dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks #", "by applicable law or agreed to in writing, software ## distributed under the", "added to the DAG\" def test__DAG_add_tasks(): A, B = get_two_tasks() C = Task(\"C.py\")", "dag = DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies(): A, B", "A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies(): A, B = get_two_tasks() C =", "A is not upstream\" def test__DAG_get_sources(): A, B = get_two_tasks() dag = DAG()", "set([B]) dag = DAG() dag.add_dependencies({C: {A, B}}) assert dag._edges[A] == set([C]) assert dag._edges[B]", "get_two_tasks() dag = DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies(): A,", "the DAG class.\"\"\" from collections import defaultdict import os import pathlib import pytest", "for the specific language governing permissions and ## limitations under the License. ##", "DAG from alyeska.compose.exceptions import CyclicGraphError from test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY,", "B: A, C: B}) # ---------------------------------------------------------------------------- # methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A,", "from alyeska.compose.exceptions import CyclicGraphError from test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, )", "DAG(tasks=make_tea) assert len(dag.tasks) == 1 dag = DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) == 2", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the", "as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError): DAG.from_yaml(COMPOSE_CYCLE) dag", "= DAG() dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(), \"acyclic graph idenfied as cyclic\" with", "= Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]}))", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See", "Task(\"C.py\") dag = DAG() dag.add_tasks({A, B}) assert dag.tasks == {A, B}, \"Test Tasks", "dag = DAG() dag.add_tasks({A, B}) assert dag.tasks == {A, B}, \"Test Tasks were", "# ---------------------------------------------------------------------------- # add dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B = get_two_tasks()", "dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is not None assert dag.get_sinks() ==", "dag = DAG() dag.add_dependencies({C: {A, B}}) assert dag._edges[A] == set([C]) assert dag._edges[B] ==", "with dependencies make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea)", "dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks # ----------------------------------------------------------------------------", "## distributed under the License is distributed on an \"AS IS\" BASIS, ##", "assert dag._edges[A] == set([C]) assert dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B =", "== {B} def test__DAG_is_cyclic(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A)", "either express or implied. ## See the License for the specific language governing", "You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ##", "COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- # Helper Functions # ---------------------------------------------------------------------------- def get_two_tasks(): return", "p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task(): A, B =", "dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) == 2 dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert", "of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable", "assert dag.get_sinks() is not None assert dag.get_sinks() == {B} def test__DAG_is_cyclic(): A, B", "get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() dag.add_dependencies({B: A}) assert dag._edges[A] ==", "set([C]) assert dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks() C =", "get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B:", "dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\")", "## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0", "dag = DAG() dag.add_task(A) assert dag.tasks == {A}, \"Test Task was not added", "= DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A, C: B}) # ---------------------------------------------------------------------------- #", "test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- # Helper Functions", "or agreed to in writing, software ## distributed under the License is distributed", "B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is not None", "cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError): DAG.from_yaml(COMPOSE_CYCLE) dag =", "pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init(): DAG()", "def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\") dag = DAG() dag.add_task(make_tea)", "env=\"test-env\") dag = DAG() dag.add_dependencies({B: A}) assert dag._edges[A] == set([B]) dag = DAG()", "pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError): DAG.from_yaml(COMPOSE_CYCLE) dag = DAG.from_yaml(COMPOSE_TRICKY) assert", "# ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") with", "the Apache License, Version 2.0 (the \"License\"); ## you may not use this", "DAG class.\"\"\" from collections import defaultdict import os import pathlib import pytest from", "A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() dag.add_dependencies({B: A})", "A, C: B}) # ---------------------------------------------------------------------------- # methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B", "dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is not None assert dag.get_downstream()[A] ==", "B}) dag.remove_task(A) assert dag.tasks == {B} def test__DAG_remove_tasks(): A, B = get_two_tasks() C", "assert dag.get_downstream() == {A: {B}}, \"Task B is not downstream\" def test__DAG_get_upstream(): A,", "B}}) assert dag._edges[A] == set([C]) assert dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B", "import CyclicGraphError from test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ----------------------------------------------------------------------------", "= get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() dag.add_dependencies({B: A}) assert dag._edges[A]", "License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or", "the License is distributed on an \"AS IS\" BASIS, ## WITHOUT WARRANTIES OR", "get_two_tasks() dag = DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks == {B} def test__DAG_remove_tasks():", "Apache License, Version 2.0 (the \"License\"); ## you may not use this file", "A, B = get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B, C})", "B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is not None", "-*- ## --------------------------------------------------------------------------- ## Copyright 2019 Dynatrace LLC ## ## Licensed under the", "http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing,", "not None assert dag.get_upstream()[B] == {A} assert dag.get_upstream() == {B: {A}}, \"Task A", "methods # ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\")", "dag.add_tasks({A, B, C}) dag.remove_tasks({A, B}) assert dag.tasks == {C} dag.remove_tasks(C) assert dag.tasks ==", "compliance with the License. ## You may obtain a copy of the License", "B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources() is not None", "C = Task(\"C.py\", env=\"test-env\") dag = DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A,", "dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(), \"acyclic graph idenfied as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A,", "make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3])", "drink_tea = Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea) assert len(dag.tasks) == 1 dag =", "1 dag = DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) == 2 dag = DAG(upstream_dependencies={drink_tea: make_tea})", "under the Apache License, Version 2.0 (the \"License\"); ## you may not use", "\"Task B is not downstream\" def test__DAG_get_upstream(): A, B = get_two_tasks() dag =", "from alyeska.compose import Task, DAG from alyeska.compose.exceptions import CyclicGraphError from test_compose_globals import (", "is not upstream\" def test__DAG_get_sources(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B,", "file except in compliance with the License. ## You may obtain a copy", "== set([B]) dag = DAG() dag.add_dependencies({C: {A, B}}) assert dag._edges[A] == set([C]) assert", "= DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks == {B} def test__DAG_remove_tasks(): A, B", "DAG() dag.add_dependencies({C: {A, B}}) assert dag._edges[A] == set([C]) assert dag._edges[B] == set([C]) def", "magic methods # ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\",", "dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is not None assert dag.get_upstream()[B] == {A} assert dag.get_upstream()", "def test__DAG_get_sinks(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks()", "Task was not added to the DAG\" def test__DAG_add_tasks(): A, B = get_two_tasks()", "dag.get_downstream() == {A: {B}}, \"Task B is not downstream\" def test__DAG_get_upstream(): A, B", "import Task, DAG from alyeska.compose.exceptions import CyclicGraphError from test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG,", "dag = DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A, C: B}) # ----------------------------------------------------------------------------", "See the License for the specific language governing permissions and ## limitations under", "obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless", "def test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG()", "dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B,", "{1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init(): DAG() # init", "= get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is not None assert", "dag.remove_task(A) assert dag.tasks == {B} def test__DAG_remove_tasks(): A, B = get_two_tasks() C =", "Task(\"C.py\", env=\"test-env\") dag = DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A, C: B})", "DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A, B}) assert dag.tasks == {C} dag.remove_tasks(C) assert dag.tasks", "dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError): DAG.from_yaml(COMPOSE_CYCLE) dag = DAG.from_yaml(COMPOSE_TRICKY) assert len(dag.tasks)", "A, B = get_two_tasks() dag = DAG() dag.add_task(A) assert dag.tasks == {A}, \"Test", "DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) == 2 def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea =", "not dag.is_cyclic(), \"acyclic graph idenfied as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml():", "dag.get_upstream()[B] == {A} assert dag.get_upstream() == {B: {A}}, \"Task A is not upstream\"", "# ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A)", "DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) == 2 dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) ==", "import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- # Helper Functions #", "dag.tasks == {A, B}, \"Test Tasks were not added to the DAG\" dag.add_tasks(C)", "set() # ---------------------------------------------------------------------------- # add dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B =", "= get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A, B})", "None assert dag.get_upstream()[B] == {A} assert dag.get_upstream() == {B: {A}}, \"Task A is", "import pytest from alyeska.compose import Task, DAG from alyeska.compose.exceptions import CyclicGraphError from test_compose_globals", "License is distributed on an \"AS IS\" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS", "dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is not None assert dag.get_upstream()[B] ==", "---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert", "{B} def test__DAG_is_cyclic(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert", "{A, B}, \"Test Tasks were not added to the DAG\" dag.add_tasks(C) assert dag.tasks", "import pathlib import pytest from alyeska.compose import Task, DAG from alyeska.compose.exceptions import CyclicGraphError", "= DAG() dag.add_tasks({A, B}) assert dag.tasks == {A, B}, \"Test Tasks were not", "DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is not None assert dag.get_upstream()[B] == {A} assert", "were not added to the DAG\" dag.add_tasks(C) assert dag.tasks == {A, B, C}", "drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init(): DAG() # init with dependencies make_tea =", "B}) assert dag.tasks == {A, B}, \"Test Tasks were not added to the", "Licensed under the Apache License, Version 2.0 (the \"License\"); ## you may not", "A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) assert dag._edges[A] == set([B])", "= get_two_tasks() dag = DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies():", "A}) assert dag._edges[A] == set([B]) dag = DAG() dag.add_dependencies({C: {A, B}}) assert dag._edges[A]", "2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init(): DAG() # init with", "== {A, B, C} def test__DAG_remove_task(): A, B = get_two_tasks() dag = DAG()", "Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError):", "def test__DAG_get_downstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream()", "assert dag._edges[A] == set([B]) dag = DAG() dag.add_dependencies({C: {A, B}}) assert dag._edges[A] ==", "---------------------------------------------------------------------------- def test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError):", "= get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources() is not None assert", "## ## Unless required by applicable law or agreed to in writing, software", "DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init(): DAG() # init with dependencies make_tea = Task(\"make_tea.py\",", "dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) == 2 def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\")", "make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea) assert len(dag.tasks)", "dag.remove_tasks(C) assert dag.tasks == set() # ---------------------------------------------------------------------------- # add dependencies # ---------------------------------------------------------------------------- def", "depends_on=A) assert dag.get_downstream() is not None assert dag.get_downstream()[A] == {B} assert dag.get_downstream() ==", "was not added to the DAG\" def test__DAG_add_tasks(): A, B = get_two_tasks() C", "dag.get_sinks() == {B} def test__DAG_is_cyclic(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B,", "A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sinks() is not", "# init with dependencies make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") dag", "not upstream\" def test__DAG_get_sources(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A)", "class.\"\"\" from collections import defaultdict import os import pathlib import pytest from alyeska.compose", "dag.tasks == set() # ---------------------------------------------------------------------------- # add dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A,", "governing permissions and ## limitations under the License. ## --------------------------------------------------------------------------- \"\"\"Unit tests for", "---------------------------------------------------------------------------- # methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B = get_two_tasks() dag =", "is not downstream\" def test__DAG_get_upstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B,", "an \"AS IS\" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A:", "repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task():", "added to the DAG\" dag.add_tasks(C) assert dag.tasks == {A, B, C} def test__DAG_remove_task():", "{A}, \"Test Task was not added to the DAG\" def test__DAG_add_tasks(): A, B", "---------------------------------------------------------------------------- # Helper Functions # ---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\"))", "the License. ## You may obtain a copy of the License at ##", "may not use this file except in compliance with the License. ## You", "set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag =", "dag.add_dependencies({B: A}) assert dag._edges[A] == set([B]) dag = DAG() dag.add_dependencies({C: {A, B}}) assert", "pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea:", "in writing, software ## distributed under the License is distributed on an \"AS", "return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG magic methods # ----------------------------------------------------------------------------", "at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed", "\"Test Task was not added to the DAG\" def test__DAG_add_tasks(): A, B =", "## limitations under the License. ## --------------------------------------------------------------------------- \"\"\"Unit tests for the DAG class.\"\"\"", "not None assert dag.get_downstream()[A] == {B} assert dag.get_downstream() == {A: {B}}, \"Task B", "get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources() is not None assert dag.get_sources()", "Functions # ---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- #", "DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources() is not None assert dag.get_sources() == {A} def", "B}) # ---------------------------------------------------------------------------- # methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B = get_two_tasks()", "A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is not", "the specific language governing permissions and ## limitations under the License. ## ---------------------------------------------------------------------------", "dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_sources() is not None assert dag.get_sources() ==", "( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE, COMPOSE_TRICKY, ) # ---------------------------------------------------------------------------- # Helper Functions # ----------------------------------------------------------------------------", "get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(), \"acyclic graph idenfied as", "## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ##", "--------------------------------------------------------------------------- \"\"\"Unit tests for the DAG class.\"\"\" from collections import defaultdict import os", "test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\") dag = DAG() dag.add_task(make_tea) assert", "LLC ## ## Licensed under the Apache License, Version 2.0 (the \"License\"); ##", "def test__DAG_is_cyclic(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert not", "= get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(), \"acyclic graph idenfied", "== set([C]) assert dag._edges[B] == set([C]) def test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks() C", "None assert dag.get_downstream()[A] == {B} assert dag.get_downstream() == {A: {B}}, \"Task B is", "is distributed on an \"AS IS\" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF", "drink_tea}}) def test__DAG_init(): DAG() # init with dependencies make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea", "= get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B}) assert dag.tasks ==", "B = get_two_tasks() dag = DAG() dag.add_task(A) assert dag.tasks == {A}, \"Test Task", "make_tea = Task(p, \"test-env\") dag = DAG() dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(),", "= DAG() dag.add_dependency(B, A) assert dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle(): A, B =", "copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by", "and ## limitations under the License. ## --------------------------------------------------------------------------- \"\"\"Unit tests for the DAG", "\"acyclic graph idenfied as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with", "Helper Functions # ---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ----------------------------------------------------------------------------", "DAG() dag.add_task(A) assert dag.tasks == {A}, \"Test Task was not added to the", "under the License. ## --------------------------------------------------------------------------- \"\"\"Unit tests for the DAG class.\"\"\" from collections", "== 2 dag = DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) == 2 dag = DAG(downstream_dependencies={make_tea:", "---------------------------------------------------------------------------- # DAG magic methods # ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\")", "under the License is distributed on an \"AS IS\" BASIS, ## WITHOUT WARRANTIES", "C} def test__DAG_remove_task(): A, B = get_two_tasks() dag = DAG() dag.add_tasks({A, B}) dag.remove_task(A)", "Dynatrace LLC ## ## Licensed under the Apache License, Version 2.0 (the \"License\");", "## ## Licensed under the Apache License, Version 2.0 (the \"License\"); ## you", "get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is not None assert dag.get_downstream()[A]", "dag.tasks == {A}, \"Test Task was not added to the DAG\" def test__DAG_add_tasks():", "tests for the DAG class.\"\"\" from collections import defaultdict import os import pathlib", "# add dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B = get_two_tasks() dag =", "def test__DAG_init(): DAG() # init with dependencies make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea =", "Task(\"C.py\") dag = DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A, B}) assert dag.tasks == {C}", "DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2,", "A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_upstream() is not", "Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea) assert len(dag.tasks) == 1", "{B}}, \"Task B is not downstream\" def test__DAG_get_upstream(): A, B = get_two_tasks() dag", "= DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies(): A, B =", "{C} dag.remove_tasks(C) assert dag.tasks == set() # ---------------------------------------------------------------------------- # add dependencies # ----------------------------------------------------------------------------", "for the DAG class.\"\"\" from collections import defaultdict import os import pathlib import", "not downstream\" def test__DAG_get_upstream(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A)", "= DAG() dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"]) # ---------------------------------------------------------------------------- # DAG.tasks", "== {A}, \"Test Task was not added to the DAG\" def test__DAG_add_tasks(): A,", "# ---------------------------------------------------------------------------- # methods # ---------------------------------------------------------------------------- def test__DAG_get_downstream(): A, B = get_two_tasks() dag", "use this file except in compliance with the License. ## You may obtain", "limitations under the License. ## --------------------------------------------------------------------------- \"\"\"Unit tests for the DAG class.\"\"\" from", "defaultdict import os import pathlib import pytest from alyeska.compose import Task, DAG from", "3}}) DAG.validate_dependency({make_tea: drink_tea}) DAG.validate_dependency({make_tea: {drink_tea, drink_tea}}) def test__DAG_init(): DAG() # init with dependencies", "dag = DAG() dag.add_dependency(B, A) assert dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle(): A, B", "the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law", "dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B,", "def test__DAG_add_dependencies(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG()", "---------------------------------------------------------------------------- # DAG.tasks # ---------------------------------------------------------------------------- def test__DAG_add_task(): A, B = get_two_tasks() dag =", "# DAG magic methods # ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea", "assert dag.tasks == set() # ---------------------------------------------------------------------------- # add dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency():", "== {B} def test__DAG_remove_tasks(): A, B = get_two_tasks() C = Task(\"C.py\") dag =", "Task(\"C.py\", env=\"test-env\") dag = DAG() dag.add_dependencies({B: A}) assert dag._edges[A] == set([B]) dag =", "the License. ## --------------------------------------------------------------------------- \"\"\"Unit tests for the DAG class.\"\"\" from collections import", "graph idenfied as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError):", "## you may not use this file except in compliance with the License.", "OF ANY KIND, either express or implied. ## See the License for the", "set([B]) def test__DAG_add_dependency_detect_cycle(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) with", "= DAG(tasks=make_tea) assert len(dag.tasks) == 1 dag = DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks) ==", "pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1,", "2.0 (the \"License\"); ## you may not use this file except in compliance", "= Task(p, \"test-env\") dag = DAG() dag.add_task(make_tea) assert repr(dag) == \"\".join([\"DAG({Task(\", p.resolve().as_posix(), \")})\"])", "KIND, either express or implied. ## See the License for the specific language", "dag.get_sinks() is not None assert dag.get_sinks() == {B} def test__DAG_is_cyclic(): A, B =", "== {A: {B}}, \"Task B is not downstream\" def test__DAG_get_upstream(): A, B =", "distributed on an \"AS IS\" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY", "assert dag.tasks == {A, B}, \"Test Tasks were not added to the DAG\"", "\"Task A is not upstream\" def test__DAG_get_sources(): A, B = get_two_tasks() dag =", "depends_on=A) assert dag.get_upstream() is not None assert dag.get_upstream()[B] == {A} assert dag.get_upstream() ==", "dependencies make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") dag = DAG(tasks=make_tea) assert", "get_two_tasks() dag = DAG() dag.add_dependency(B, A) assert dag._edges[A] == set([B]) def test__DAG_add_dependency_detect_cycle(): A,", "-*- coding: utf-8 -*- ## --------------------------------------------------------------------------- ## Copyright 2019 Dynatrace LLC ## ##", "import defaultdict import os import pathlib import pytest from alyeska.compose import Task, DAG", "env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG magic methods # ---------------------------------------------------------------------------- def test__validate_dependency(): make_tea =", "Tasks were not added to the DAG\" dag.add_tasks(C) assert dag.tasks == {A, B,", "\"License\"); ## you may not use this file except in compliance with the", "implied. ## See the License for the specific language governing permissions and ##", "OR CONDITIONS OF ANY KIND, either express or implied. ## See the License", "DAG() with pytest.raises(CyclicGraphError): dag.add_dependencies({A: C, B: A, C: B}) # ---------------------------------------------------------------------------- # methods", "dag.tasks == {A, B, C} def test__DAG_remove_task(): A, B = get_two_tasks() dag =", "assert len(dag.tasks) == 2 dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) == 2 def", "DAG() dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(), \"acyclic graph idenfied as cyclic\" with pytest.raises(CyclicGraphError):", "Task, DAG from alyeska.compose.exceptions import CyclicGraphError from test_compose_globals import ( COMPOSE_SMALL, COMPOSE_BIG, COMPOSE_CYCLE,", "---------------------------------------------------------------------------- def test__DAG_add_task(): A, B = get_two_tasks() dag = DAG() dag.add_task(A) assert dag.tasks", "from collections import defaultdict import os import pathlib import pytest from alyeska.compose import", "test__DAG_add_dependency_detect_cycle2(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\") dag = DAG() with", "## See the License for the specific language governing permissions and ## limitations", "drink_tea}) assert len(dag.tasks) == 2 def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea = Task(p,", "B = get_two_tasks() dag = DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks == {B}", "with the License. ## You may obtain a copy of the License at", "2 def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea = Task(p, \"test-env\") dag = DAG()", "not use this file except in compliance with the License. ## You may", "dag.remove_tasks({A, B}) assert dag.tasks == {C} dag.remove_tasks(C) assert dag.tasks == set() # ----------------------------------------------------------------------------", "B, C} def test__DAG_remove_task(): A, B = get_two_tasks() dag = DAG() dag.add_tasks({A, B})", "assert dag.get_upstream() == {B: {A}}, \"Task A is not upstream\" def test__DAG_get_sources(): A,", "\"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2, 3]) with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError):", "test__validate_dependency(): make_tea = Task(\"make_tea.py\", \"test-env\") drink_tea = Task(\"drink_tea.py\", \"test-env\") with pytest.raises(TypeError): DAG.validate_dependency([1, 2,", "ANY KIND, either express or implied. ## See the License for the specific", "DAG(upstream_dependencies={drink_tea: make_tea}) assert len(dag.tasks) == 2 dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) ==", "dag.get_sources() is not None assert dag.get_sources() == {A} def test__DAG_get_sinks(): A, B =", "\"Test Tasks were not added to the DAG\" dag.add_tasks(C) assert dag.tasks == {A,", "B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def", "= Task(\"C.py\") dag = DAG() dag.add_tasks({A, B, C}) dag.remove_tasks({A, B}) assert dag.tasks ==", "dag = DAG() dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(), \"acyclic graph idenfied as cyclic\"", "assert dag.tasks == {C} dag.remove_tasks(C) assert dag.tasks == set() # ---------------------------------------------------------------------------- # add", "on an \"AS IS\" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "with pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies(): A, B = get_two_tasks() C = Task(\"C.py\",", "the License for the specific language governing permissions and ## limitations under the", "required by applicable law or agreed to in writing, software ## distributed under", "def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL) with pytest.raises(CyclicGraphError): DAG.from_yaml(COMPOSE_CYCLE) dag = DAG.from_yaml(COMPOSE_TRICKY) assert len(dag.tasks) > 0", "test__DAG_is_cyclic(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert not dag.is_cyclic(),", "2 dag = DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) == 2 def test__DAG_repr(): p =", "B = get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B}) assert dag.tasks", "dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks == {B} def test__DAG_remove_tasks(): A, B = get_two_tasks()", "with pytest.raises(ValueError): DAG.validate_dependency(defaultdict(set, {make_tea: [drink_tea]})) with pytest.raises(ValueError): DAG.validate_dependency({Task: {1, 2, 3}}) DAG.validate_dependency({make_tea: drink_tea})", "pytest.raises(CyclicGraphError): dag.add_dependency(A, B) def test__DAG_add_dependencies(): A, B = get_two_tasks() C = Task(\"C.py\", env=\"test-env\")", "law or agreed to in writing, software ## distributed under the License is", "in compliance with the License. ## You may obtain a copy of the", "None assert dag.get_sources() == {A} def test__DAG_get_sinks(): A, B = get_two_tasks() dag =", "---------------------------------------------------------------------------- # add dependencies # ---------------------------------------------------------------------------- def test__DAG_add_dependency(): A, B = get_two_tasks() dag", "may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ##", "dag.is_cyclic(), \"acyclic graph idenfied as cyclic\" with pytest.raises(CyclicGraphError): dag.add_dependency(A, depends_on=B) def test__DAG_from_yaml(): DAG.from_yaml(COMPOSE_SMALL)", "# ---------------------------------------------------------------------------- def get_two_tasks(): return (Task(\"A.py\", env=\"test-env\"), Task(\"B.py\", env=\"test-env\")) # ---------------------------------------------------------------------------- # DAG", "dag = DAG() dag.add_dependencies({B: A}) assert dag._edges[A] == set([B]) dag = DAG() dag.add_dependencies({C:", "pathlib import pytest from alyeska.compose import Task, DAG from alyeska.compose.exceptions import CyclicGraphError from", "utf-8 -*- ## --------------------------------------------------------------------------- ## Copyright 2019 Dynatrace LLC ## ## Licensed under", "= get_two_tasks() dag = DAG() dag.add_dependency(B, depends_on=A) assert dag.get_downstream() is not None assert", "get_two_tasks() C = Task(\"C.py\") dag = DAG() dag.add_tasks({A, B}) assert dag.tasks == {A,", "test__DAG_add_dependency(): A, B = get_two_tasks() dag = DAG() dag.add_dependency(B, A) assert dag._edges[A] ==", "== {A} assert dag.get_upstream() == {B: {A}}, \"Task A is not upstream\" def", "dag = DAG(tasks=make_tea) assert len(dag.tasks) == 1 dag = DAG(tasks={drink_tea, make_tea}) assert len(dag.tasks)", "coding: utf-8 -*- ## --------------------------------------------------------------------------- ## Copyright 2019 Dynatrace LLC ## ## Licensed", "= DAG(downstream_dependencies={make_tea: drink_tea}) assert len(dag.tasks) == 2 def test__DAG_repr(): p = pathlib.Path(\"make_tea.py\") make_tea", "= get_two_tasks() dag = DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks == {B} def", "dag.get_downstream() is not None assert dag.get_downstream()[A] == {B} assert dag.get_downstream() == {A: {B}},", "== {B: {A}}, \"Task A is not upstream\" def test__DAG_get_sources(): A, B =", "A, B = get_two_tasks() dag = DAG() dag.add_tasks({A, B}) dag.remove_task(A) assert dag.tasks ==", "## Copyright 2019 Dynatrace LLC ## ## Licensed under the Apache License, Version" ]
[ "nproc_ob = nprocs_global / list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] =", "KIND, either express or implied. # See the License for the specific language", "inputmap['nstate'] = n print '[ScaLAPACK]' print 'process grid map (row,col) = ({},{})'.format(nprow,npcol) print", "Unless required by applicable law or agreed to in writing, software # distributed", "if num_rgrid[0] % nprocs_per_node != 0: print '[INFO] num_rgrid[0] % nprocs_per_node is not", "print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit()", "get_sl_process_grid_size(n, nprocs) if (nprow*npcol != nprocs): print '[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs)", "' nproc_ob = {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation =", "print '/' # FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0] % nprocs_rgrid[1] y2", "dl_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif", "= 'orbital_sequential' # FFTE checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print", "n = min(len(l1),len(l2)) p = [] for i in range(0,n): p.append(l1[i] / l2[i])", "= int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid']", "does not support yet'.format(inputmap['theory']) # logical checking... if not 'num_rgrid' in inputmap.keys(): dims", "if inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs) if", "fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft': print 'Theory {} does not", "checking... if not 'num_rgrid' in inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f)", "= [] for i in range(0,n): p.append(l1[i] / l2[i]) return p def list_prod(nlist):", "= max(nb*npcol, n) n = min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow, npcol) if", "is suitable to run the application.' print '' print 'NXxNY = {}x{}'.format(sx,sy) print", "nproc_k = 1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if", "FFTE.' print 'please check for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print", "this file except in compliance with the License. # You may obtain a", "= False inputmap['process_allocation'] = 'grid_sequential' with open(filename) as f: number_pattern = re.compile(r'\\d+') float_pattern", "return nprow,npcol def get_sl_blocking_factor(n, nprow, npcol): k1 = (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb", "0: ny = ii break return [nprocs_per_node, ny, nz] if __name__ == '__main__':", "input list def to_boolean(s): if s.lower() == '\\'y\\'': return True elif s.lower() ==", "to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap", "in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if nprocs % list_prod([nprocs_per_node, ii, nz])", "False inputmap['process_allocation'] = 'grid_sequential' with open(filename) as f: number_pattern = re.compile(r'\\d+') float_pattern =", "def get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs))) npcol = npcol + (npcol % 2)", "x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif num_rgrid_pattern.match(s): dims = [] for", "= nproc_rgrid if nproc_ob >= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' # FFTE checking... if", "yet'.format(inputmap['theory']) # logical checking... if not 'num_rgrid' in inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl'])", "for i in range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of", "nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] *", "= int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims = [] for", "t = t / 3 for j in range(0,11): if t % 5", "' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system' print ' nstate = {}'.format(inputmap['nstate'])", "for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif num_rgrid_pattern.match(s): dims = []", "ANY KIND, either express or implied. # See the License for the specific", "= {} inputmap['nproc_k'] = 1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] =", "inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid and nproc_rgrid are available to", "{}'.format(inputmap['nproc_k']) print ' nproc_ob = {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print '", "# Copyright 2020 ARTED developers # # Licensed under the Apache License, Version", "l2[i]) return p def list_prod(nlist): n = 1 for i in nlist: n", "(n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb = min(k1,k2) nb = mb return mb,nb def", "inputmap['num_rgrid'] = [int(f) for f in dims] print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid'])", "print '[INFO]' print 'product(nproc_rgrid) * nproc_ob * nproc_k /= # of MPI procs", "print 'If you use mesh-torus network type system,' print 'probably, the following node", "== 0: t = t / 3 for j in range(0,11): if t", "= {}'.format(inputmap['nproc_k']) print ' nproc_ob = {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print", "the License. # import math import re import sys def list_div(l1,l2): n =", "nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] =", "with open(filename) as f: number_pattern = re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'')", "= re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern =", "inputmap['dl'] = dims elif num_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x))", "nz = nz + (nz % 2) for ii in range(0,100): ny =", "nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global / list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k'] =", "= 'grid_sequential' with open(filename) as f: number_pattern = re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern", "else: print '[FFTE]' print 'num_rgrid and nproc_rgrid are unsuitable for using FFTE.' print", "nprow, npcol) if (n != inputmap['nstate']): print '[ScaLAPACK]' print 'nstate should be changed", "get_sl_blocking_factor(n, nprow, npcol) if (n != inputmap['nstate']): print '[ScaLAPACK]' print 'nstate should be", "{} != {}'.format(nprow*npcol,nprocs) print ' please check the nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n,", "inputmap['yn_ffte'] = False inputmap['yn_scalapack'] = False inputmap['process_allocation'] = 'grid_sequential' with open(filename) as f:", "npcol + 2 return nprow,npcol def get_sl_blocking_factor(n, nprow, npcol): k1 = (n+nprow-1)/nprow k2", "from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print '[ScaLAPACK]' print 'process grid map", "of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node sy =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "inputmap = {} inputmap['nproc_k'] = 1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte']", "= num_rgrid[i] for j in range(0,26): if t % 2 == 0: t", "if len(sys.argv) < 4: print '[Usage] ./{} <SALMON inputfile> <required # of node>", "print ' nstate = {}'.format(inputmap['nstate']) print '/' print '&rgrid' print ' num_rgrid =", "= n print '[ScaLAPACK]' print 'process grid map (row,col) = ({},{})'.format(nprow,npcol) print 'blocking", "MPI process' for i in range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print", "if (n != inputmap['nstate']): print '[ScaLAPACK]' print 'nstate should be changed from {}", "print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif num_rgrid_pattern.match(s):", "nz] if __name__ == '__main__': if len(sys.argv) < 4: print '[Usage] ./{} <SALMON", "inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob >= nprocs_per_node:", "print 'wave function size per MPI process' for i in range(0,3): print 'num_rgrid[{}]", "{}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] * inputmap['nproc_k'] /", "nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0] % nprocs_per_node != 0: print '[INFO] num_rgrid[0] %", "ARTED developers # # Licensed under the Apache License, Version 2.0 (the \"License\");", "elif nproc_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims", "t = num_rgrid[i] for j in range(0,26): if t % 2 == 0:", "return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0] % nprocs_per_node != 0:", "function size per MPI process' for i in range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}]", "/ 5 if t != 1: return False return True # ScaLAPACK def", "2) for ii in range(0,100): nprow = nprocs / npcol if (nprow*npcol ==", "z1 == 0 and z2 == 0 def check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3):", "FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0] % nprocs_rgrid[1] y2 = num_rgrid[1] %", "OF ANY KIND, either express or implied. # See the License for the", "print '[INFO] num_rgrid[0] % nprocs_per_node is not divided.' nzy = nprocs / nprocs_per_node", "to use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors for number of", "combination of 2,3, or 5' else: print '[FFTE]' print 'num_rgrid and nproc_rgrid are", "print '[Usage] ./{} <SALMON inputfile> <required # of node> <required # of procs/node>'.format(sys.argv[0])", "= [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif num_rgrid_pattern.match(s): dims", "= list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f) for f in dims] print '[INFO] num_rgrid", "% 2) for ii in range(0,100): ny = nzy / nz if (ny*nz", "number of grids must be combination of 2,3, or 5' else: print '[FFTE]'", "and y2 == 0 and z1 == 0 and z2 == 0 def", "0 and z1 == 0 and z2 == 0 def check_ffte_condition_prime_factors(num_rgrid): for i", "procs = {}'.format(nprocs_global) print 'calculate nproc_k,ob and rgrid' # find nproc_.* num_rgrid =", "scale system.' print 'please replace the following inputlists.' print '' print_inputlists(inputmap) print ''", "# SALMON input list def to_boolean(s): if s.lower() == '\\'y\\'': return True elif", "nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern", "def check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3): t = num_rgrid[i] for j in range(0,26):", "= n * i return n def fail_exit(): print 'ERROR.' sys.exit(1) # SALMON", "'# =============================================== #' print 'Probably suitable parameters for large scale system.' print 'please", "'calculate nproc_k,ob and rgrid' # find nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global,", "{}'.format(nprow*npcol,nprocs) print ' please check the nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n, nprow, npcol)", "dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s):", "for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be", "nz + (nz % 2) for ii in range(0,100): ny = nzy /", "nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern", "0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0'", "n = max(nb*npcol, n) n = min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow, npcol)", "0: t = t / 3 for j in range(0,11): if t %", "print '' print '# =============================================== #' print 'Probably suitable parameters for large scale", "yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] =", "'[INFO]' print 'product(nproc_rgrid) * nproc_ob * nproc_k /= # of MPI procs =", "print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1", "inputmap['nproc_k'] = 1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack']", "inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack'] = False inputmap['process_allocation'] = 'grid_sequential' with", "[1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack'] = False inputmap['process_allocation'] = 'grid_sequential' with open(filename) as", "ii in range(0,100): nprow = nprocs / npcol if (nprow*npcol == nprocs): break", "0: print '[INFO] num_rgrid[0] % nprocs_per_node is not divided.' nzy = nprocs /", "print 'num_rgrid and nproc_rgrid are unsuitable for using FFTE.' print 'please check for", "unsuitable for using FFTE.' print 'please check for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must", "z2 == 0 def check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3): t = num_rgrid[i] for", "get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0] % nprocs_per_node != 0: print '[INFO] num_rgrid[0]", "= int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global = nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid']) *", "__name__ == '__main__': if len(sys.argv) < 4: print '[Usage] ./{} <SALMON inputfile> <required", "inputmap['nproc_rgrid'] = dims elif al_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e')))", "' nstate = {}'.format(inputmap['nstate']) print '/' print '&rgrid' print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid'])", "print 'blocking factor (row,col) = ({},{})'.format(mb,nb) print '[INFO]' print 'wave function size per", "not divided.' nzy = nprocs / nprocs_per_node nz = int(math.sqrt(float(nzy))) nz = nz", "if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid and nproc_rgrid are available to use", "print '[FFTE]' print 'num_rgrid and nproc_rgrid are available to use FFTE.' if not", "string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap): print '&parallel' print ' nproc_k = {}'.format(inputmap['nproc_k']) print", "mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit() # ScaLAPACK checking... if inputmap['yn_scalapack']: n = inputmap['nstate']", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "size per MPI process' for i in range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}] =", "nprocs) if (nprow*npcol != nprocs): print '[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs) print", "nzy / nz if (ny*nz == nzy): break nz = nz + 2", "in range(0,17): if t % 3 == 0: t = t / 3", "{} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print '[ScaLAPACK]' print 'process grid map (row,col)", "* i return n def fail_exit(): print 'ERROR.' sys.exit(1) # SALMON input list", "n * i return n def fail_exit(): print 'ERROR.' sys.exit(1) # SALMON input", "as f: number_pattern = re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern =", "theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern", "print 'Theory {} does not support yet'.format(inputmap['theory']) # logical checking... if not 'num_rgrid'", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "sx = inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print '' print", "+ (nz % 2) for ii in range(0,100): ny = nzy / nz", "nproc_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif", "2) for ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if nprocs %", "<required # of procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft': print", "inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group())", "nprow*npcol = {} != {}'.format(nprow*npcol,nprocs) print ' please check the nproc_ob' fail_exit() mb,nb", "find nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob =", "range(0,100): nprow = nprocs / npcol if (nprow*npcol == nprocs): break npcol =", "list_prod([nprocs_per_node, ii, nz]) == 0: ny = ii break return [nprocs_per_node, ny, nz]", "inputmap def print_inputlists(inputmap): print '&parallel' print ' nproc_k = {}'.format(inputmap['nproc_k']) print ' nproc_ob", "to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print '[ScaLAPACK]' print 'process grid map (row,col) =", "in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif num_rgrid_pattern.match(s): dims = [] for x", "# ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs))) npcol = npcol + (npcol", "nproc_ob >= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' # FFTE checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'],", "re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+')", "2,3, or 5' else: print '[FFTE]' print 'num_rgrid and nproc_rgrid are unsuitable for", "nprocs_per_node != 0: print '[INFO] num_rgrid[0] % nprocs_per_node is not divided.' nzy =", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif al_pattern.match(s): dims", "condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0'", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "dims] print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node = int(sys.argv[3])", "= False inputmap['yn_scalapack'] = False inputmap['process_allocation'] = 'grid_sequential' with open(filename) as f: number_pattern", "t / 3 for j in range(0,11): if t % 5 == 0:", "break npcol = npcol + 2 return nprow,npcol def get_sl_blocking_factor(n, nprow, npcol): k1", "print 'process grid map (row,col) = ({},{})'.format(nprow,npcol) print 'blocking factor (row,col) = ({},{})'.format(mb,nb)", "if is_ffte: ny = ny + (ny % 2) for ii in range(ny,1,-1):", "dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern =", "k2 = (n+npcol-1)/npcol mb = min(k1,k2) nb = mb return mb,nb def get_nproc_rgrid(nprocs,", "= int(math.sqrt(float(nzy))) nz = nz + (nz % 2) for ii in range(0,100):", "num_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims elif", "elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation']", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "n = n * i return n def fail_exit(): print 'ERROR.' sys.exit(1) #", "re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*')", "ii break return [nprocs_per_node, ny, nz] if __name__ == '__main__': if len(sys.argv) <", "range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if nprocs % list_prod([nprocs_per_node, ii, nz]) ==", "print '[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs) print ' please check the nproc_ob'", "nproc_k,ob and rgrid' # find nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node,", "must be combination of 2,3, or 5' else: print '[FFTE]' print 'num_rgrid and", "(n != inputmap['nstate']): print '[ScaLAPACK]' print 'nstate should be changed from {} to", "for large scale system.' print 'please replace the following inputlists.' print '' print_inputlists(inputmap)", "yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f: if", "checking... if inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs)", "nprocs = inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs) if (nprow*npcol != nprocs): print '[ScaLAPACK]", "num_rgrid[0] % nprocs_rgrid[1] y2 = num_rgrid[1] % nprocs_rgrid[1] z1 = num_rgrid[1] % nprocs_rgrid[2]", "required by applicable law or agreed to in writing, software # distributed under", "or 5' else: print '[FFTE]' print 'num_rgrid and nproc_rgrid are unsuitable for using", "re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f:", "mb return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0] % nprocs_per_node !=", "theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] =", "{}'.format(inputmap['nproc_ob']) print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/'", "nprocs_rgrid): y1 = num_rgrid[0] % nprocs_rgrid[1] y2 = num_rgrid[1] % nprocs_rgrid[1] z1 =", "2020 ARTED developers # # Licensed under the Apache License, Version 2.0 (the", "applicable law or agreed to in writing, software # distributed under the License", "% 3 == 0: t = t / 3 for j in range(0,11):", "nz + 2 if is_ffte: ny = ny + (ny % 2) for", "nstate = {}'.format(inputmap['nstate']) print '/' print '&rgrid' print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print", "nprocs): break npcol = npcol + 2 return nprow,npcol def get_sl_blocking_factor(n, nprow, npcol):", "0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit() # ScaLAPACK checking... if inputmap['yn_scalapack']:", "if t % 2 == 0: t = t / 2 for j", "!= nprocs): print '[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs) print ' please check", "1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack'] = False", "string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s):", "return [nprocs_per_node, ny, nz] if __name__ == '__main__': if len(sys.argv) < 4: print", "= num_rgrid[1] % nprocs_rgrid[1] z1 = num_rgrid[1] % nprocs_rgrid[2] z2 = num_rgrid[2] %", "/ 2 for j in range(0,17): if t % 3 == 0: t", "num_rgrid[0] % nprocs_per_node != 0: print '[INFO] num_rgrid[0] % nprocs_per_node is not divided.'", "2 if is_ffte: ny = ny + (ny % 2) for ii in", "support yet'.format(inputmap['theory']) # logical checking... if not 'num_rgrid' in inputmap.keys(): dims = list_div(inputmap['al'],", "= [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte']", "or agreed to in writing, software # distributed under the License is distributed", "num_rgrid[2] % nprocs_rgrid[2] return y1 == 0 and y2 == 0 and z1", "1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob >=", "inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob >= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential'", "'dft': print 'Theory {} does not support yet'.format(inputmap['theory']) # logical checking... if not", "= dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif", "nprocs / nprocs_per_node nz = int(math.sqrt(float(nzy))) nz = nz + (nz % 2)", "nproc_rgrid are available to use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors", "[] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif al_pattern.match(s): dims =", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if nprocs % list_prod([nprocs_per_node, ii,", "inputmap['yn_ffte']) nproc_ob = nprocs_global / list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob']", "check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3): t = num_rgrid[i] for j in range(0,26): if", "range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob'])", "= get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global / list_prod(nproc_rgrid) nproc_k = 1", "* nproc_ob * nproc_k /= # of MPI procs = {}'.format(nprocs_global) print 'calculate", "for j in range(0,26): if t % 2 == 0: t = t", "if (ny*nz == nzy): break nz = nz + 2 if is_ffte: ny", "inputfile> <required # of node> <required # of procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1])", "nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol != n): n =", "nlist: n = n * i return n def fail_exit(): print 'ERROR.' sys.exit(1)", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "network type system,' print 'probably, the following node shape is suitable to run", "== nprocs): break npcol = npcol + 2 return nprow,npcol def get_sl_blocking_factor(n, nprow,", "be changed from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print '[ScaLAPACK]' print 'process", "writing, software # distributed under the License is distributed on an \"AS IS\"", "* inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global: print '[INFO]' print 'product(nproc_rgrid) * nproc_ob *", "= {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print", "print_inputlists(inputmap) print '' print '# =============================================== #' print 'If you use mesh-torus network", "else: return False def gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k'] = 1 inputmap['nproc_ob'] =", "be combination of 2,3, or 5' else: print '[FFTE]' print 'num_rgrid and nproc_rgrid", "governing permissions and # limitations under the License. # import math import re", "n print '[ScaLAPACK]' print 'process grid map (row,col) = ({},{})'.format(nprow,npcol) print 'blocking factor", "print '# =============================================== #' print 'Probably suitable parameters for large scale system.' print", "number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif al_pattern.match(s): dims = [] for x in", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "1: return False return True # ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs)))", "range(0,n): p.append(l1[i] / l2[i]) return p def list_prod(nlist): n = 1 for i", "License. # You may obtain a copy of the License at # #", "nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system' print", "({},{})'.format(mb,nb) print '[INFO]' print 'wave function size per MPI process' for i in", "= min(k1,k2) nb = mb return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if", "re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+')", "t != 1: return False return True # ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol", "(nprow*npcol == nprocs): break npcol = npcol + 2 return nprow,npcol def get_sl_blocking_factor(n,", "if nproc_ob >= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' # FFTE checking... if inputmap['yn_ffte']: if", "float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif dl_pattern.match(s): dims = [] for x in", "for i in range(0,3): t = num_rgrid[i] for j in range(0,26): if t", "nprocs_global = nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global:", "= dims elif al_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al']", "'&parallel' print ' nproc_k = {}'.format(inputmap['nproc_k']) print ' nproc_ob = {}'.format(inputmap['nproc_ob']) print '", "% nprocs_rgrid[1] z1 = num_rgrid[1] % nprocs_rgrid[2] z2 = num_rgrid[2] % nprocs_rgrid[2] return", "= re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*')", "[] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] =", "elif s.lower() == '\\'n\\'': return False else: return False def gen_inputlist_map(filename): inputmap =", "the application.' print '' print 'NXxNY = {}x{}'.format(sx,sy) print 'NX = (nproc_ob *", "nprocs_global / list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid']", "= nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global: print", "compliance with the License. # You may obtain a copy of the License", "inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims = []", "re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f: if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate']", "% nprocs_per_node != 0: print '[INFO] num_rgrid[0] % nprocs_per_node is not divided.' nzy", "z2 = num_rgrid[2] % nprocs_rgrid[2] return y1 == 0 and y2 == 0", "check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid and nproc_rgrid are available to use FFTE.'", "to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap): print '&parallel' print", "if nprocs % list_prod([nprocs_per_node, ii, nz]) == 0: ny = ii break return", "elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims = [] for x in", "True elif s.lower() == '\\'n\\'': return False else: return False def gen_inputlist_map(filename): inputmap", "' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print '", "= {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system' print '", "inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft': print 'Theory {} does not support", "print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/' print", "and nproc_rgrid are unsuitable for using FFTE.' print 'please check for condition:' print", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "check for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must", "'blocking factor (row,col) = ({},{})'.format(mb,nb) print '[INFO]' print 'wave function size per MPI", "if (nprow*npcol == nprocs): break npcol = npcol + 2 return nprow,npcol def", "nprocs): npcol = int(math.sqrt(float(nprocs))) npcol = npcol + (npcol % 2) for ii", "0' fail_exit() # ScaLAPACK checking... if inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs = inputmap['nproc_ob']", "int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] =", "def list_div(l1,l2): n = min(len(l1),len(l2)) p = [] for i in range(0,n): p.append(l1[i]", "fail_exit() mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol != n): n = max(nb*npcol,", "nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print '' print '# =============================================== #' print 'Probably suitable", "print '# =============================================== #' print 'If you use mesh-torus network type system,' print", "p def list_prod(nlist): n = 1 for i in nlist: n = n", "% nprocs_per_node is not divided.' nzy = nprocs / nprocs_per_node nz = int(math.sqrt(float(nzy)))", "'Theory {} does not support yet'.format(inputmap['theory']) # logical checking... if not 'num_rgrid' in", "if t % 3 == 0: t = t / 3 for j", "in inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f) for f in dims]", "'&rgrid' print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid):", "sy = list_prod(inputmap['nproc_rgrid']) print '' print '# =============================================== #' print 'Probably suitable parameters", "'product(nproc_rgrid) * nproc_ob * nproc_k /= # of MPI procs = {}'.format(nprocs_global) print", "= nprocs / npcol if (nprow*npcol == nprocs): break npcol = npcol +", "0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0'", "inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f) for f in dims] print", "of procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft': print 'Theory {}", "'please replace the following inputlists.' print '' print_inputlists(inputmap) print '' print '# ===============================================", "mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit() # ScaLAPACK", "not 'num_rgrid' in inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f) for f", "n) n = min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (n !=", "not use this file except in compliance with the License. # You may", "for ii in range(0,100): nprow = nprocs / npcol if (nprow*npcol == nprocs):", "'NX = (nproc_ob * nproc_k) / # of process per node' print 'NY", "nprocs % list_prod([nprocs_per_node, ii, nz]) == 0: ny = ii break return [nprocs_per_node,", "'please check for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2))", "ny + (ny % 2) for ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "not support yet'.format(inputmap['theory']) # logical checking... if not 'num_rgrid' in inputmap.keys(): dims =", "= num_rgrid[2] % nprocs_rgrid[2] return y1 == 0 and y2 == 0 and", "range(0,26): if t % 2 == 0: t = t / 2 for", "be 0' fail_exit() # ScaLAPACK checking... if inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs =", "[] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif num_rgrid_pattern.match(s): dims =", "n = inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs) if (nprow*npcol !=", "string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern", "!= 0: print '[INFO] num_rgrid[0] % nprocs_per_node is not divided.' nzy = nprocs", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "dims.append(int(x)) inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] =", "' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 =", "t / 5 if t != 1: return False return True # ScaLAPACK", "* nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global: print '[INFO]' print", "inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global: print '[INFO]' print 'product(nproc_rgrid) * nproc_ob * nproc_k", "list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f) for f in dims] print '[INFO] num_rgrid constructed", "'\\'n\\'': return False else: return False def gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k'] =", "% 2 == 0: t = t / 2 for j in range(0,17):", "= nprocs / nprocs_per_node nz = int(math.sqrt(float(nzy))) nz = nz + (nz %", "(nz % 2) for ii in range(0,100): ny = nzy / nz if", "print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global", "inputmap['nproc_k'] != nprocs_global: print '[INFO]' print 'product(nproc_rgrid) * nproc_ob * nproc_k /= #", "num_rgrid = inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global /", "# you may not use this file except in compliance with the License.", "suitable parameters for large scale system.' print 'please replace the following inputlists.' print", "== nzy): break nz = nz + 2 if is_ffte: ny = ny", "True # ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs))) npcol = npcol +", "= re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern =", "'/' print '&rgrid' print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE def", "fail_exit() # ScaLAPACK checking... if inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol", "for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif dl_pattern.match(s): dims = []", "nprow,npcol = get_sl_process_grid_size(n, nprocs) if (nprow*npcol != nprocs): print '[ScaLAPACK] nprow*npcol = {}", "agreed to in writing, software # distributed under the License is distributed on", "= [1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack'] = False inputmap['process_allocation'] = 'grid_sequential' with open(filename)", "* nproc_k /= # of MPI procs = {}'.format(nprocs_global) print 'calculate nproc_k,ob and", "range(0,11): if t % 5 == 0: t = t / 5 if", "dims elif al_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] =", "# find nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob", "in nlist: n = n * i return n def fail_exit(): print 'ERROR.'", "= inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs) if (nprow*npcol != nprocs): print '[ScaLAPACK] nprow*npcol", "nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global: print '[INFO]' print 'product(nproc_rgrid)", "(the \"License\"); # you may not use this file except in compliance with", "def list_prod(nlist): n = 1 for i in nlist: n = n *", "num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0]", "= [int(f) for f in dims] print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes", "= get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol != n): n = max(nb*npcol, n) n", "permissions and # limitations under the License. # import math import re import", "print 'NXxNY = {}x{}'.format(sx,sy) print 'NX = (nproc_ob * nproc_k) / # of", "t % 2 == 0: t = t / 2 for j in", "print '&parallel' print ' nproc_k = {}'.format(inputmap['nproc_k']) print ' nproc_ob = {}'.format(inputmap['nproc_ob']) print", "= re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f: if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s):", "!= inputmap['nstate']): print '[ScaLAPACK]' print 'nstate should be changed from {} to {}'.format(inputmap['nstate'],n)", "# Unless required by applicable law or agreed to in writing, software #", "= list_prod(inputmap['nproc_rgrid']) print '' print '# =============================================== #' print 'Probably suitable parameters for", "= nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob >= nprocs_per_node: inputmap['process_allocation']", "/ nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print '' print '# =============================================== #' print 'Probably", "by applicable law or agreed to in writing, software # distributed under the", "dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f) for f in dims] print '[INFO]", "nprocs_per_node = int(sys.argv[3]) nprocs_global = nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] *", "s.lower() == '\\'y\\'': return True elif s.lower() == '\\'n\\'': return False else: return", "you use mesh-torus network type system,' print 'probably, the following node shape is", "== 0 and y2 == 0 and z1 == 0 and z2 ==", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "if inputmap['theory'] != 'dft': print 'Theory {} does not support yet'.format(inputmap['theory']) # logical", "i return n def fail_exit(): print 'ERROR.' sys.exit(1) # SALMON input list def", "nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob >= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' # FFTE", "elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob']", "nprocs_rgrid[2] return y1 == 0 and y2 == 0 and z1 == 0", "if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global: print '[INFO]' print 'product(nproc_rgrid) *", "in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif dl_pattern.match(s): dims = [] for x", "npcol): k1 = (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb = min(k1,k2) nb = mb", "print ' nproc_k = {}'.format(inputmap['nproc_k']) print ' nproc_ob = {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid", "'' print '# =============================================== #' print 'If you use mesh-torus network type system,'", "(nprow*npcol != nprocs): print '[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs) print ' please", "import re import sys def list_div(l1,l2): n = min(len(l1),len(l2)) p = [] for", "# limitations under the License. # import math import re import sys def", "/ 3 for j in range(0,11): if t % 5 == 0: t", "system.' print 'please replace the following inputlists.' print '' print_inputlists(inputmap) print '' print", "type system,' print 'probably, the following node shape is suitable to run the", "False inputmap['yn_scalapack'] = False inputmap['process_allocation'] = 'grid_sequential' with open(filename) as f: number_pattern =", "inputmap['dl']) inputmap['num_rgrid'] = [int(f) for f in dims] print '[INFO] num_rgrid constructed =", "nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global: print '[INFO]'", "nnodes = int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global = nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid'])", "'num_rgrid and nproc_rgrid are unsuitable for using FFTE.' print 'please check for condition:'", "# of procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft': print 'Theory", "yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f: if theory_pattern.match(s): inputmap['theory'] =", "return y1 == 0 and y2 == 0 and z1 == 0 and", "int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims = [] for x", "file except in compliance with the License. # You may obtain a copy", "is not divided.' nzy = nprocs / nprocs_per_node nz = int(math.sqrt(float(nzy))) nz =", "= ({},{})'.format(nprow,npcol) print 'blocking factor (row,col) = ({},{})'.format(mb,nb) print '[INFO]' print 'wave function", "list_div(l1,l2): n = min(len(l1),len(l2)) p = [] for i in range(0,n): p.append(l1[i] /", "'' print 'NXxNY = {}x{}'.format(sx,sy) print 'NX = (nproc_ob * nproc_k) / #", "=============================================== #' print 'Probably suitable parameters for large scale system.' print 'please replace", "return n def fail_exit(): print 'ERROR.' sys.exit(1) # SALMON input list def to_boolean(s):", "= {}x{}'.format(sx,sy) print 'NX = (nproc_ob * nproc_k) / # of process per", "'__main__': if len(sys.argv) < 4: print '[Usage] ./{} <SALMON inputfile> <required # of", "and rgrid' # find nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid,", "i in range(0,n): p.append(l1[i] / l2[i]) return p def list_prod(nlist): n = 1", "node> <required # of procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft':", "inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap): print", "print 'calculate nproc_k,ob and rgrid' # find nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid =", "[] for i in range(0,n): p.append(l1[i] / l2[i]) return p def list_prod(nlist): n", "License for the specific language governing permissions and # limitations under the License.", "available to use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors for number", "= nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob >= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' #", "inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs) if (nprow*npcol != nprocs): print", "list_prod(inputmap['nproc_rgrid']) print '' print '# =============================================== #' print 'Probably suitable parameters for large", "= nzy / nz if (ny*nz == nzy): break nz = nz +", "<required # of node> <required # of procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if", "= get_sl_blocking_factor(n, nprow, npcol) if (n != inputmap['nstate']): print '[ScaLAPACK]' print 'nstate should", "ny = ii break return [nprocs_per_node, ny, nz] if __name__ == '__main__': if", "to in writing, software # distributed under the License is distributed on an", "'grid_sequential' with open(filename) as f: number_pattern = re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern =", "y1 = num_rgrid[0] % nprocs_rgrid[1] y2 = num_rgrid[1] % nprocs_rgrid[1] z1 = num_rgrid[1]", "in range(0,11): if t % 5 == 0: t = t / 5", "= re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern =", "def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0] % nprocs_rgrid[1] y2 = num_rgrid[1] % nprocs_rgrid[1]", "'probably, the following node shape is suitable to run the application.' print ''", "implied. # See the License for the specific language governing permissions and #", "nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern", "for ii in range(0,100): ny = nzy / nz if (ny*nz == nzy):", "\"License\"); # you may not use this file except in compliance with the", "min(len(l1),len(l2)) p = [] for i in range(0,n): p.append(l1[i] / l2[i]) return p", "= 1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack'] = False inputmap['process_allocation'] =", "{}'.format(inputmap['nstate']) print '/' print '&rgrid' print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/' #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "in range(0,3): t = num_rgrid[i] for j in range(0,26): if t % 2", "False return True # ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs))) npcol =", "(nb*npcol != n): n = max(nb*npcol, n) n = min(n, (nb+1)*npcol) mb,nb =", "% 2) for ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if nprocs", "'\\'y\\'': return True elif s.lower() == '\\'n\\'': return False else: return False def", "re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f: if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower()", "dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s):", "def get_sl_blocking_factor(n, nprow, npcol): k1 = (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb = min(k1,k2)", "'# =============================================== #' print 'If you use mesh-torus network type system,' print 'probably,", "dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif num_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s):", "= to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return", "npcol if (nprow*npcol == nprocs): break npcol = npcol + 2 return nprow,npcol", "range(0,3): t = num_rgrid[i] for j in range(0,26): if t % 2 ==", "break return [nprocs_per_node, ny, nz] if __name__ == '__main__': if len(sys.argv) < 4:", "checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid and nproc_rgrid are", "if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid and nproc_rgrid are available", "' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit() # ScaLAPACK checking... if inputmap['yn_scalapack']: n =", "n = 1 for i in nlist: n = n * i return", "or implied. # See the License for the specific language governing permissions and", "inputmap['nstate']): print '[ScaLAPACK]' print 'nstate should be changed from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate']", "return inputmap def print_inputlists(inputmap): print '&parallel' print ' nproc_k = {}'.format(inputmap['nproc_k']) print '", "= inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs) if (nprow*npcol != nprocs):", "= t / 2 for j in range(0,17): if t % 3 ==", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "/ npcol if (nprow*npcol == nprocs): break npcol = npcol + 2 return", "nz = nz + 2 if is_ffte: ny = ny + (ny %", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system' print ' nstate = {}'.format(inputmap['nstate']) print '/' print", "nproc_k /= # of MPI procs = {}'.format(nprocs_global) print 'calculate nproc_k,ob and rgrid'", "(row,col) = ({},{})'.format(mb,nb) print '[INFO]' print 'wave function size per MPI process' for", "re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+')", "print 'num_rgrid and nproc_rgrid are available to use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print", "return False else: return False def gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k'] = 1", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "if (nprow*npcol != nprocs): print '[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs) print '", "in writing, software # distributed under the License is distributed on an \"AS", "z1 = num_rgrid[1] % nprocs_rgrid[2] z2 = num_rgrid[2] % nprocs_rgrid[2] return y1 ==", "nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' # FFTE checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print", "' prime factors for number of grids must be combination of 2,3, or", "npcol) if (nb*npcol != n): n = max(nb*npcol, n) n = min(n, (nb+1)*npcol)", "== '\\'n\\'': return False else: return False def gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k']", "print ' please check the nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n, nprow, npcol) if", "if t != 1: return False return True # ScaLAPACK def get_sl_process_grid_size(n, nprocs):", "sys.exit(1) # SALMON input list def to_boolean(s): if s.lower() == '\\'y\\'': return True", "nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims =", "process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system' print ' nstate = {}'.format(inputmap['nstate']) print", "= npcol + 2 return nprow,npcol def get_sl_blocking_factor(n, nprow, npcol): k1 = (n+nprow-1)/nprow", "nprow, npcol) if (nb*npcol != n): n = max(nb*npcol, n) n = min(n,", "== 0 and z1 == 0 and z2 == 0 def check_ffte_condition_prime_factors(num_rgrid): for", "t % 5 == 0: t = t / 5 if t !=", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+')", "gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k'] = 1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] = [1,1,1]", "return False return True # ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs))) npcol", "num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s", "nprocs_rgrid[1] z1 = num_rgrid[1] % nprocs_rgrid[2] z2 = num_rgrid[2] % nprocs_rgrid[2] return y1", "re import sys def list_div(l1,l2): n = min(len(l1),len(l2)) p = [] for i", "if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k']", "5 == 0: t = t / 5 if t != 1: return", "= gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft': print 'Theory {} does not support yet'.format(inputmap['theory'])", "specific language governing permissions and # limitations under the License. # import math", "SALMON input list def to_boolean(s): if s.lower() == '\\'y\\'': return True elif s.lower()", "re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*')", "= string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap): print '&parallel' print ' nproc_k = {}'.format(inputmap['nproc_k'])", "4: print '[Usage] ./{} <SALMON inputfile> <required # of node> <required # of", "# FFTE checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid and", "list def to_boolean(s): if s.lower() == '\\'y\\'': return True elif s.lower() == '\\'n\\'':", "print '[INFO]' print 'wave function size per MPI process' for i in range(0,3):", "f: number_pattern = re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'')", "= {}'.format(nprocs_global) print 'calculate nproc_k,ob and rgrid' # find nproc_.* num_rgrid = inputmap['num_rgrid']", "re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+')", "'[Usage] ./{} <SALMON inputfile> <required # of node> <required # of procs/node>'.format(sys.argv[0]) fail_exit()", "node shape is suitable to run the application.' print '' print 'NXxNY =", "{}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print '[ScaLAPACK]' print 'process grid map (row,col) = ({},{})'.format(nprow,npcol)", "npcol = npcol + (npcol % 2) for ii in range(0,100): nprow =", "2 return nprow,npcol def get_sl_blocking_factor(n, nprow, npcol): k1 = (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol", "= int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif", "= re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*')", "'[FFTE]' print 'num_rgrid and nproc_rgrid are unsuitable for using FFTE.' print 'please check", "nzy): break nz = nz + 2 if is_ffte: ny = ny +", "/ nprocs_per_node nz = int(math.sqrt(float(nzy))) nz = nz + (nz % 2) for", "of node> <required # of procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory'] !=", "# import math import re import sys def list_div(l1,l2): n = min(len(l1),len(l2)) p", "= ii break return [nprocs_per_node, ny, nz] if __name__ == '__main__': if len(sys.argv)", "= nz + 2 if is_ffte: ny = ny + (ny % 2)", "= re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern =", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif al_pattern.match(s): dims = [] for x in float_pattern.findall(s):", "you may not use this file except in compliance with the License. #", "ny = ny + (ny % 2) for ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid,", "in f: if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif", "nz]) == 0: ny = ii break return [nprocs_per_node, ny, nz] if __name__", "% nprocs_rgrid[2] z2 = num_rgrid[2] % nprocs_rgrid[2] return y1 == 0 and y2", "'[FFTE]' print 'num_rgrid and nproc_rgrid are available to use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']):", "import math import re import sys def list_div(l1,l2): n = min(len(l1),len(l2)) p =", "# # Copyright 2020 ARTED developers # # Licensed under the Apache License,", "inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs) if (nprow*npcol", "following node shape is suitable to run the application.' print '' print 'NXxNY", "/ nz if (ny*nz == nzy): break nz = nz + 2 if", "and # limitations under the License. # import math import re import sys", "'Probably suitable parameters for large scale system.' print 'please replace the following inputlists.'", "logical checking... if not 'num_rgrid' in inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] =", "factor (row,col) = ({},{})'.format(mb,nb) print '[INFO]' print 'wave function size per MPI process'", "if s.lower() == '\\'y\\'': return True elif s.lower() == '\\'n\\'': return False else:", "= npcol + (npcol % 2) for ii in range(0,100): nprow = nprocs", "should be changed from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print '[ScaLAPACK]' print", "mesh-torus network type system,' print 'probably, the following node shape is suitable to", "t = t / 2 for j in range(0,17): if t % 3", "y2 == 0 and z1 == 0 and z2 == 0 def check_ffte_condition_prime_factors(num_rgrid):", "use this file except in compliance with the License. # You may obtain", "< 4: print '[Usage] ./{} <SALMON inputfile> <required # of node> <required #", "= {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global = nnodes * nprocs_per_node", "(ny*nz == nzy): break nz = nz + 2 if is_ffte: ny =", "nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s):", "check the nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol != n):", "num_rgrid[1] % nprocs_rgrid[1] z1 = num_rgrid[1] % nprocs_rgrid[2] z2 = num_rgrid[2] % nprocs_rgrid[2]", "nproc_ob = {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation'])", ">= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' # FFTE checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']):", "the nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol != n): n", "#' print 'Probably suitable parameters for large scale system.' print 'please replace the", "is_ffte): if num_rgrid[0] % nprocs_per_node != 0: print '[INFO] num_rgrid[0] % nprocs_per_node is", "'[INFO]' print 'wave function size per MPI process' for i in range(0,3): print", "print ' nproc_ob = {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation", "print '&rgrid' print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE def check_ffte_condition_gridsize(num_rgrid,", "* inputmap['nproc_k'] != nprocs_global: print '[INFO]' print 'product(nproc_rgrid) * nproc_ob * nproc_k /=", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid", "math import re import sys def list_div(l1,l2): n = min(len(l1),len(l2)) p = []", "t / 2 for j in range(0,17): if t % 3 == 0:", "(nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (n != inputmap['nstate']): print '[ScaLAPACK]' print", "yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap):", "nz if (ny*nz == nzy): break nz = nz + 2 if is_ffte:", "= t / 3 for j in range(0,11): if t % 5 ==", "mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (n != inputmap['nstate']): print '[ScaLAPACK]' print 'nstate", "al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern=", "to run the application.' print '' print 'NXxNY = {}x{}'.format(sx,sy) print 'NX =", "inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack'] = False inputmap['process_allocation']", "constructed = {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global = nnodes *", "'nstate should be changed from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print '[ScaLAPACK]'", "= num_rgrid[0] % nprocs_rgrid[1] y2 = num_rgrid[1] % nprocs_rgrid[1] z1 = num_rgrid[1] %", "'[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global =", "2.0 (the \"License\"); # you may not use this file except in compliance", "of grids must be combination of 2,3, or 5' else: print '[FFTE]' print", "dims elif dl_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] =", "if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if nprocs % list_prod([nprocs_per_node, ii, nz]) == 0:", "s.lower() == '\\'n\\'': return False else: return False def gen_inputlist_map(filename): inputmap = {}", "./{} <SALMON inputfile> <required # of node> <required # of procs/node>'.format(sys.argv[0]) fail_exit() inputmap", "'process grid map (row,col) = ({},{})'.format(nprow,npcol) print 'blocking factor (row,col) = ({},{})'.format(mb,nb) print", "!= 'dft': print 'Theory {} does not support yet'.format(inputmap['theory']) # logical checking... if", "get_sl_blocking_factor(n, nprow, npcol): k1 = (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb = min(k1,k2) nb", "for the specific language governing permissions and # limitations under the License. #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "3 == 0: t = t / 3 for j in range(0,11): if", "import sys def list_div(l1,l2): n = min(len(l1),len(l2)) p = [] for i in", "use mesh-torus network type system,' print 'probably, the following node shape is suitable", "= (n+npcol-1)/npcol mb = min(k1,k2) nb = mb return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node,", "return False def gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k'] = 1 inputmap['nproc_ob'] = 1", "is_ffte: ny = ny + (ny % 2) for ii in range(ny,1,-1): if", "proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f: if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif", "= re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f: if theory_pattern.match(s):", "# # Unless required by applicable law or agreed to in writing, software", "f: if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s):", "re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+')", "for ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if nprocs % list_prod([nprocs_per_node,", "= inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global / list_prod(nproc_rgrid)", "FFTE checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid and nproc_rgrid", "inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower()", "express or implied. # See the License for the specific language governing permissions", "print 'Probably suitable parameters for large scale system.' print 'please replace the following", "nproc_k = {}'.format(inputmap['nproc_k']) print ' nproc_ob = {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid'])", "=============================================== #' print 'If you use mesh-torus network type system,' print 'probably, the", "== '__main__': if len(sys.argv) < 4: print '[Usage] ./{} <SALMON inputfile> <required #", "= int(sys.argv[3]) nprocs_global = nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k']", "i in range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital", "are unsuitable for using FFTE.' print 'please check for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2))", "'ERROR.' sys.exit(1) # SALMON input list def to_boolean(s): if s.lower() == '\\'y\\'': return", "inputmap['nproc_k'] / nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print '' print '# =============================================== #' print", "+ (npcol % 2) for ii in range(0,100): nprow = nprocs / npcol", "<SALMON inputfile> <required # of node> <required # of procs/node>'.format(sys.argv[0]) fail_exit() inputmap =", "{}x{}'.format(sx,sy) print 'NX = (nproc_ob * nproc_k) / # of process per node'", "either express or implied. # See the License for the specific language governing", "num_rgrid, is_ffte): if num_rgrid[0] % nprocs_per_node != 0: print '[INFO] num_rgrid[0] % nprocs_per_node", "= 1 for i in nlist: n = n * i return n", "'' print_inputlists(inputmap) print '' print '# =============================================== #' print 'If you use mesh-torus", "n): n = max(nb*npcol, n) n = min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow,", "be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be", "== 0: ny = ii break return [nprocs_per_node, ny, nz] if __name__ ==", "= mb return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0] % nprocs_per_node", "fail_exit(): print 'ERROR.' sys.exit(1) # SALMON input list def to_boolean(s): if s.lower() ==", "if not 'num_rgrid' in inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f) for", "% 2) for ii in range(0,100): nprow = nprocs / npcol if (nprow*npcol", "nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern", "inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x))", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "j in range(0,17): if t % 3 == 0: t = t /", "0 def check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3): t = num_rgrid[i] for j in", "(ny % 2) for ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if", "get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global / list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k']", "!= n): n = max(nb*npcol, n) n = min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n,", "= min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (n != inputmap['nstate']): print", "/ list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] =", "False else: return False def gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k'] = 1 inputmap['nproc_ob']", "n = min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (n != inputmap['nstate']):", "nprow, npcol): k1 = (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb = min(k1,k2) nb =", "{},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0] % nprocs_rgrid[1]", "large scale system.' print 'please replace the following inputlists.' print '' print_inputlists(inputmap) print", "if (nb*npcol != n): n = max(nb*npcol, n) n = min(n, (nb+1)*npcol) mb,nb", "== 0 def check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3): t = num_rgrid[i] for j", "% list_prod([nprocs_per_node, ii, nz]) == 0: ny = ii break return [nprocs_per_node, ny,", "[int(f) for f in dims] print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes =", "= re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern = re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern =", "dims elif num_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] =", "in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s):", "== 0: t = t / 5 if t != 1: return False", "yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern", "2 for j in range(0,17): if t % 3 == 0: t =", "= (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb = min(k1,k2) nb = mb return mb,nb", "the License. # You may obtain a copy of the License at #", "= re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern =", "% nprocs_rgrid[1] y2 = num_rgrid[1] % nprocs_rgrid[1] z1 = num_rgrid[1] % nprocs_rgrid[2] z2", "get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs))) npcol = npcol + (npcol % 2) for", "inputmap['process_allocation'] = 'orbital_sequential' # FFTE checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]'", "shape is suitable to run the application.' print '' print 'NXxNY = {}x{}'.format(sx,sy)", "[nprocs_per_node, ny, nz] if __name__ == '__main__': if len(sys.argv) < 4: print '[Usage]", "% 5 == 0: t = t / 5 if t != 1:", "5 if t != 1: return False return True # ScaLAPACK def get_sl_process_grid_size(n,", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "following inputlists.' print '' print_inputlists(inputmap) print '' print '# =============================================== #' print 'If", "(npcol % 2) for ii in range(0,100): nprow = nprocs / npcol if", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "for i in nlist: n = n * i return n def fail_exit():", "num_rgrid[i] for j in range(0,26): if t % 2 == 0: t =", "'/' # FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0] % nprocs_rgrid[1] y2 =", "{} does not support yet'.format(inputmap['theory']) # logical checking... if not 'num_rgrid' in inputmap.keys():", "be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be", "= dims elif dl_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl']", "the following node shape is suitable to run the application.' print '' print", "sys def list_div(l1,l2): n = min(len(l1),len(l2)) p = [] for i in range(0,n):", "developers # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif al_pattern.match(s): dims = [] for x", "'/' print '&system' print ' nstate = {}'.format(inputmap['nstate']) print '/' print '&rgrid' print", "inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global / list_prod(nproc_rgrid) nproc_k", "#!/usr/bin/env python # # Copyright 2020 ARTED developers # # Licensed under the", "nprocs): print '[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs) print ' please check the", "be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit() # ScaLAPACK checking... if", "re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for", "# FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0] % nprocs_rgrid[1] y2 = num_rgrid[1]", "= {},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE def check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0] %", "print '/' print '&system' print ' nstate = {}'.format(inputmap['nstate']) print '/' print '&rgrid'", "number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack']", "5' else: print '[FFTE]' print 'num_rgrid and nproc_rgrid are unsuitable for using FFTE.'", "i in range(0,3): t = num_rgrid[i] for j in range(0,26): if t %", "def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0] % nprocs_per_node != 0: print '[INFO]", "= to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap): print '&parallel'", "= nprocs_global / list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] = nproc_ob", "use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors for number of grids", "npcol = int(math.sqrt(float(nprocs))) npcol = npcol + (npcol % 2) for ii in", "'num_rgrid and nproc_rgrid are available to use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print '", "({},{})'.format(nprow,npcol) print 'blocking factor (row,col) = ({},{})'.format(mb,nb) print '[INFO]' print 'wave function size", "must be 0' fail_exit() # ScaLAPACK checking... if inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs", "print 'please check for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print '", "num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global / list_prod(nproc_rgrid) nproc_k = 1 inputmap['nproc_k'] = nproc_k", "int(math.sqrt(float(nprocs))) npcol = npcol + (npcol % 2) for ii in range(0,100): nprow", "break nz = nz + 2 if is_ffte: ny = ny + (ny", "= {} != {}'.format(nprow*npcol,nprocs) print ' please check the nproc_ob' fail_exit() mb,nb =", "python # # Copyright 2020 ARTED developers # # Licensed under the Apache", "nproc_ob * nproc_k /= # of MPI procs = {}'.format(nprocs_global) print 'calculate nproc_k,ob", "print 'NX = (nproc_ob * nproc_k) / # of process per node' print", "Copyright 2020 ARTED developers # # Licensed under the Apache License, Version 2.0", "limitations under the License. # import math import re import sys def list_div(l1,l2):", "to_boolean(s): if s.lower() == '\\'y\\'': return True elif s.lower() == '\\'n\\'': return False", "range(0,100): ny = nzy / nz if (ny*nz == nzy): break nz =", "nprow = nprocs / npcol if (nprow*npcol == nprocs): break npcol = npcol", "application.' print '' print 'NXxNY = {}x{}'.format(sx,sy) print 'NX = (nproc_ob * nproc_k)", "per MPI process' for i in range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i])", "/ nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob']", "= num_rgrid[1] % nprocs_rgrid[2] z2 = num_rgrid[2] % nprocs_rgrid[2] return y1 == 0", "= re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern =", "with the License. # You may obtain a copy of the License at", "= re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern =", "'&system' print ' nstate = {}'.format(inputmap['nstate']) print '/' print '&rgrid' print ' num_rgrid", "(n+npcol-1)/npcol mb = min(k1,k2) nb = mb return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid,", "inputmap['theory'] != 'dft': print 'Theory {} does not support yet'.format(inputmap['theory']) # logical checking...", "f in dims] print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node", "FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors for number of grids must", "0 and y2 == 0 and z1 == 0 and z2 == 0", "'orbital_sequential' # FFTE checking... if inputmap['yn_ffte']: if check_ffte_condition_gridsize(inputmap['num_rgrid'], inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid", "system,' print 'probably, the following node shape is suitable to run the application.'", "print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "# of node> <required # of procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory']", "# of MPI procs = {}'.format(nprocs_global) print 'calculate nproc_k,ob and rgrid' # find", "for f in dims] print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2])", "1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack'] = False inputmap['process_allocation'] = 'grid_sequential'", "!= {}'.format(nprow*npcol,nprocs) print ' please check the nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n, nprow,", "parameters for large scale system.' print 'please replace the following inputlists.' print ''", "mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0] % nprocs_per_node != 0: print", "grids must be combination of 2,3, or 5' else: print '[FFTE]' print 'num_rgrid", "elif num_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims", "max(nb*npcol, n) n = min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (n", "inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group())", "' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print '", "process' for i in range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '#", "'num_rgrid' in inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid'] = [int(f) for f in", "int(math.sqrt(float(nzy))) nz = nz + (nz % 2) for ii in range(0,100): ny", "# ScaLAPACK checking... if inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol =", "'' print '# =============================================== #' print 'Probably suitable parameters for large scale system.'", "[] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif dl_pattern.match(s): dims =", "' nproc_k = {}'.format(inputmap['nproc_k']) print ' nproc_ob = {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid =", "law or agreed to in writing, software # distributed under the License is", "if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors for number of grids must be", "the License for the specific language governing permissions and # limitations under the", "def gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k'] = 1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] =", "factors for number of grids must be combination of 2,3, or 5' else:", "n def fail_exit(): print 'ERROR.' sys.exit(1) # SALMON input list def to_boolean(s): if", "mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol != n): n = max(nb*npcol, n)", "inputlists.' print '' print_inputlists(inputmap) print '' print '# =============================================== #' print 'If you", "print '[FFTE]' print 'num_rgrid and nproc_rgrid are unsuitable for using FFTE.' print 'please", "print '[ScaLAPACK]' print 'nstate should be changed from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] =", "run the application.' print '' print 'NXxNY = {}x{}'.format(sx,sy) print 'NX = (nproc_ob", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "print_inputlists(inputmap): print '&parallel' print ' nproc_k = {}'.format(inputmap['nproc_k']) print ' nproc_ob = {}'.format(inputmap['nproc_ob'])", "MPI procs = {}'.format(nprocs_global) print 'calculate nproc_k,ob and rgrid' # find nproc_.* num_rgrid", "for number of grids must be combination of 2,3, or 5' else: print", "inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap): print '&parallel' print ' nproc_k =", "'[ScaLAPACK] nprow*npcol = {} != {}'.format(nprow*npcol,nprocs) print ' please check the nproc_ob' fail_exit()", "changed from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print '[ScaLAPACK]' print 'process grid", "j in range(0,26): if t % 2 == 0: t = t /", "+ 2 if is_ffte: ny = ny + (ny % 2) for ii", "y1 == 0 and y2 == 0 and z1 == 0 and z2", "print '' print_inputlists(inputmap) print '' print '# =============================================== #' print 'If you use", "must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must", "num_rgrid[0] % nprocs_per_node is not divided.' nzy = nprocs / nprocs_per_node nz =", "+ 2 return nprow,npcol def get_sl_blocking_factor(n, nprow, npcol): k1 = (n+nprow-1)/nprow k2 =", "(nproc_ob * nproc_k) / # of process per node' print 'NY = product(nproc_rgrid)'", "replace the following inputlists.' print '' print_inputlists(inputmap) print '' print '# =============================================== #'", "= (nproc_ob * nproc_k) / # of process per node' print 'NY =", "i in nlist: n = n * i return n def fail_exit(): print", "num_rgrid[1] % nprocs_rgrid[2] z2 = num_rgrid[2] % nprocs_rgrid[2] return y1 == 0 and", "for x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif al_pattern.match(s): dims = []", "' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system'", "nprocs_rgrid[2] z2 = num_rgrid[2] % nprocs_rgrid[2] return y1 == 0 and y2 ==", "nprocs / npcol if (nprow*npcol == nprocs): break npcol = npcol + 2", "inputmap['al'] = dims elif dl_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e')))", "for j in range(0,11): if t % 5 == 0: t = t", "rgrid' # find nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte'])", "elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group()) elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap def", "in compliance with the License. # You may obtain a copy of the", "= \\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system' print ' nstate = {}'.format(inputmap['nstate']) print '/'", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "number_pattern = re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern", "must be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit() # ScaLAPACK checking...", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "open(filename) as f: number_pattern = re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern", "elif dl_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims", "inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif yn_scalapack_pattern.match(s): inputmap['yn_scalapack'] = to_boolean(yn_pattern.search(s).group())", "nprocs_per_node is not divided.' nzy = nprocs / nprocs_per_node nz = int(math.sqrt(float(nzy))) nz", "2) for ii in range(0,100): ny = nzy / nz if (ny*nz ==", "dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif dl_pattern.match(s):", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= ny + (ny % 2) for ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node,", "procs/node>'.format(sys.argv[0]) fail_exit() inputmap = gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft': print 'Theory {} does", "num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global = nnodes", "elif proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap): print '&parallel' print '", "print '/' print '&rgrid' print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/' # FFTE", "not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors for number of grids must be combination", "for using FFTE.' print 'please check for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be", "ii in range(0,100): ny = nzy / nz if (ny*nz == nzy): break", "print 'probably, the following node shape is suitable to run the application.' print", "!= nprocs_global: print '[INFO]' print 'product(nproc_rgrid) * nproc_ob * nproc_k /= # of", "in range(0,100): ny = nzy / nz if (ny*nz == nzy): break nz", "range(0,17): if t % 3 == 0: t = t / 3 for", "See the License for the specific language governing permissions and # limitations under", "[nprocs_per_node, ii, nz]): if nprocs % list_prod([nprocs_per_node, ii, nz]) == 0: ny =", "print 'product(nproc_rgrid) * nproc_ob * nproc_k /= # of MPI procs = {}'.format(nprocs_global)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= 1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] = False inputmap['yn_scalapack'] =", "check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors for number of grids must be combination of", "ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs))) npcol = npcol + (npcol %", "License. # import math import re import sys def list_div(l1,l2): n = min(len(l1),len(l2))", "dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif al_pattern.match(s):", "print ' prime factors for number of grids must be combination of 2,3,", "nz]): if nprocs % list_prod([nprocs_per_node, ii, nz]) == 0: ny = ii break", "inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n, nprocs) if (nprow*npcol != nprocs): print '[ScaLAPACK] nprow*npcol =", "print '[ScaLAPACK]' print 'process grid map (row,col) = ({},{})'.format(nprow,npcol) print 'blocking factor (row,col)", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s): dims", "nb = mb return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0] %", "!= 1: return False return True # ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "gen_inputlist_map(sys.argv[1]) if inputmap['theory'] != 'dft': print 'Theory {} does not support yet'.format(inputmap['theory']) #", "float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?') yn_pattern = re.compile(r'\\'[yYnN]\\'') string_pattern = re.compile(r'\\'\\w*\\'') theory_pattern = re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern", "nprocs_global: print '[INFO]' print 'product(nproc_rgrid) * nproc_ob * nproc_k /= # of MPI", "return p def list_prod(nlist): n = 1 for i in nlist: n =", "= re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+') al_pattern =", "'# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node sy", "inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob >= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' # FFTE checking...", "inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print '' print '# ===============================================", "npcol + (npcol % 2) for ii in range(0,100): nprow = nprocs /", "re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s in f: if theory_pattern.match(s): inputmap['theory']", "int(sys.argv[3]) nprocs_global = nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] !=", "npcol) if (n != inputmap['nstate']): print '[ScaLAPACK]' print 'nstate should be changed from", "= dims elif num_rgrid_pattern.match(s): dims = [] for x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid']", "if t % 5 == 0: t = t / 5 if t", "are available to use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime factors for", "using FFTE.' print 'please check for condition:' print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0'", "int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global = nnodes * nprocs_per_node if list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob']", "return True elif s.lower() == '\\'n\\'': return False else: return False def gen_inputlist_map(filename):", "list_prod(nlist): n = 1 for i in nlist: n = n * i", "ny = nzy / nz if (ny*nz == nzy): break nz = nz", "orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node sy = list_prod(inputmap['nproc_rgrid'])", "1 for i in nlist: n = n * i return n def", "the specific language governing permissions and # limitations under the License. # import", "= get_sl_process_grid_size(n, nprocs) if (nprow*npcol != nprocs): print '[ScaLAPACK] nprow*npcol = {} !=", "= min(len(l1),len(l2)) p = [] for i in range(0,n): p.append(l1[i] / l2[i]) return", "inputmap['yn_scalapack'] = False inputmap['process_allocation'] = 'grid_sequential' with open(filename) as f: number_pattern = re.compile(r'\\d+')", "print 'nstate should be changed from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n print", "'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx =", "{} inputmap['nproc_k'] = 1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid'] = [1,1,1] inputmap['yn_ffte'] = False", "# logical checking... if not 'num_rgrid' in inputmap.keys(): dims = list_div(inputmap['al'], inputmap['dl']) inputmap['num_rgrid']", "def print_inputlists(inputmap): print '&parallel' print ' nproc_k = {}'.format(inputmap['nproc_k']) print ' nproc_ob =", "of 2,3, or 5' else: print '[FFTE]' print 'num_rgrid and nproc_rgrid are unsuitable", "/= # of MPI procs = {}'.format(nprocs_global) print 'calculate nproc_k,ob and rgrid' #", "nproc_rgrid are unsuitable for using FFTE.' print 'please check for condition:' print '", "al_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif", "'NXxNY = {}x{}'.format(sx,sy) print 'NX = (nproc_ob * nproc_k) / # of process", "3 for j in range(0,11): if t % 5 == 0: t =", "'[ScaLAPACK]' print 'process grid map (row,col) = ({},{})'.format(nprow,npcol) print 'blocking factor (row,col) =", "mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3))", "in range(0,n): p.append(l1[i] / l2[i]) return p def list_prod(nlist): n = 1 for", "ii, nz]) == 0: ny = ii break return [nprocs_per_node, ny, nz] if", "float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['dl'] = dims elif num_rgrid_pattern.match(s): dims = [] for x in", "'[INFO] num_rgrid[0] % nprocs_per_node is not divided.' nzy = nprocs / nprocs_per_node nz", "under the License. # import math import re import sys def list_div(l1,l2): n", "return True # ScaLAPACK def get_sl_process_grid_size(n, nprocs): npcol = int(math.sqrt(float(nprocs))) npcol = npcol", "= {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] * inputmap['nproc_k']", "nprocs_rgrid[1] y2 = num_rgrid[1] % nprocs_rgrid[1] z1 = num_rgrid[1] % nprocs_rgrid[2] z2 =", "map (row,col) = ({},{})'.format(nprow,npcol) print 'blocking factor (row,col) = ({},{})'.format(mb,nb) print '[INFO]' print", "x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group()) elif", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "def fail_exit(): print 'ERROR.' sys.exit(1) # SALMON input list def to_boolean(s): if s.lower()", "#' print 'If you use mesh-torus network type system,' print 'probably, the following", "ny, nz] if __name__ == '__main__': if len(sys.argv) < 4: print '[Usage] ./{}", "check_ffte_condition_gridsize(num_rgrid, nprocs_rgrid): y1 = num_rgrid[0] % nprocs_rgrid[1] y2 = num_rgrid[1] % nprocs_rgrid[1] z1", "= 1 inputmap['nproc_k'] = nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob", "print '' print 'NXxNY = {}x{}'.format(sx,sy) print 'NX = (nproc_ob * nproc_k) /", "{}'.format(nprocs_global) print 'calculate nproc_k,ob and rgrid' # find nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid", "nzy = nprocs / nprocs_per_node nz = int(math.sqrt(float(nzy))) nz = nz + (nz", "for x in number_pattern.findall(s): dims.append(int(x)) inputmap['num_rgrid'] = dims elif yn_ffte_pattern.match(s): inputmap['yn_ffte'] = to_boolean(yn_pattern.search(s).group())", "= nz + (nz % 2) for ii in range(0,100): ny = nzy", "for i in range(0,n): p.append(l1[i] / l2[i]) return p def list_prod(nlist): n =", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "in range(0,3): print 'num_rgrid[{}] / nproc_rgrid[{}] = {}'.format(i,i,float(inputmap['num_rgrid'][i])/inputmap['nproc_rgrid'][i]) print '# of orbital =", "nprow,npcol def get_sl_blocking_factor(n, nprow, npcol): k1 = (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb =", "and z1 == 0 and z2 == 0 def check_ffte_condition_prime_factors(num_rgrid): for i in", "print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit() # ScaLAPACK checking... if inputmap['yn_scalapack']: n", "check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]): if nprocs % list_prod([nprocs_per_node, ii, nz]) == 0: ny", "min(n, (nb+1)*npcol) mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (n != inputmap['nstate']): print '[ScaLAPACK]'", "p.append(l1[i] / l2[i]) return p def list_prod(nlist): n = 1 for i in", "= ({},{})'.format(mb,nb) print '[INFO]' print 'wave function size per MPI process' for i", "== 0: t = t / 2 for j in range(0,17): if t", "'[ScaLAPACK]' print 'nstate should be changed from {} to {}'.format(inputmap['nstate'],n) inputmap['nstate'] = n", "and z2 == 0 def check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3): t = num_rgrid[i]", "k1 = (n+nprow-1)/nprow k2 = (n+npcol-1)/npcol mb = min(k1,k2) nb = mb return", "/ l2[i]) return p def list_prod(nlist): n = 1 for i in nlist:", "nprocs_per_node nz = int(math.sqrt(float(nzy))) nz = nz + (nz % 2) for ii", "nz = int(math.sqrt(float(nzy))) nz = nz + (nz % 2) for ii in", "grid map (row,col) = ({},{})'.format(nprow,npcol) print 'blocking factor (row,col) = ({},{})'.format(mb,nb) print '[INFO]'", "npcol = npcol + 2 return nprow,npcol def get_sl_blocking_factor(n, nprow, npcol): k1 =", "mb = min(k1,k2) nb = mb return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte):", "language governing permissions and # limitations under the License. # import math import", "t % 3 == 0: t = t / 3 for j in", "min(k1,k2) nb = mb return mb,nb def get_nproc_rgrid(nprocs, nprocs_per_node, num_rgrid, is_ffte): if num_rgrid[0]", "dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif dl_pattern.match(s): dims = [] for x in float_pattern.findall(s):", "print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system' print ' nstate =", "p = [] for i in range(0,n): p.append(l1[i] / l2[i]) return p def", "False def gen_inputlist_map(filename): inputmap = {} inputmap['nproc_k'] = 1 inputmap['nproc_ob'] = 1 inputmap['nproc_rgrid']", "print 'ERROR.' sys.exit(1) # SALMON input list def to_boolean(s): if s.lower() == '\\'y\\'':", "s in f: if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group())", "0: t = t / 5 if t != 1: return False return", "proc_alloc_pattern.match(s): inputmap['process_allocation'] = string_pattern.search(s).group().strip('\\'').lower() return inputmap def print_inputlists(inputmap): print '&parallel' print ' nproc_k", "2 == 0: t = t / 2 for j in range(0,17): if", "= t / 5 if t != 1: return False return True #", "= {}'.format(inputmap['nstate']) print '/' print '&rgrid' print ' num_rgrid = {},{},{}'.format(*inputmap['num_rgrid']) print '/'", "nproc_k inputmap['nproc_ob'] = nproc_ob inputmap['nproc_rgrid'] = nproc_rgrid if nproc_ob >= nprocs_per_node: inputmap['process_allocation'] =", "ScaLAPACK checking... if inputmap['yn_scalapack']: n = inputmap['nstate'] nprocs = inputmap['nproc_ob'] nprow,npcol = get_sl_process_grid_size(n,", "and nproc_rgrid are available to use FFTE.' if not check_ffte_condition_prime_factors(inputmap['num_rgrid']): print ' prime", "nproc_rgrid if nproc_ob >= nprocs_per_node: inputmap['process_allocation'] = 'orbital_sequential' # FFTE checking... if inputmap['yn_ffte']:", "in range(0,100): nprow = nprocs / npcol if (nprow*npcol == nprocs): break npcol", "inputmap['process_allocation'] = 'grid_sequential' with open(filename) as f: number_pattern = re.compile(r'\\d+') float_pattern = re.compile(r'[+-]?(?:\\d+\\.?\\d*|\\.\\d+)(?:[dD][+-]?\\d+)?')", "int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif nproc_ob_pattern.match(s): inputmap['nproc_ob'] = int(number_pattern.search(s).group()) elif nproc_rgrid_pattern.match(s):", "print '# of orbital = {}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node", "j in range(0,11): if t % 5 == 0: t = t /", "print '' print '# =============================================== #' print 'If you use mesh-torus network type", "% nprocs_rgrid[2] return y1 == 0 and y2 == 0 and z1 ==", "' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must be 0' fail_exit() #", "x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif dl_pattern.match(s): dims = [] for", "= {}'.format(inputmap['nproc_ob']) print ' nproc_rgrid = {},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print", "nproc_.* num_rgrid = inputmap['num_rgrid'] nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global", "= re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern = re.compile(r'\\s*process_allocation\\s*=\\s*') for s in", "prime factors for number of grids must be combination of 2,3, or 5'", "mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3))", "re.compile(r'\\s*theory\\s*=\\s*') nproc_k_pattern = re.compile(r'\\s*nproc_k\\s*=\\s*\\d+') nproc_ob_pattern = re.compile(r'\\s*nproc_ob\\s*=\\s*\\d+') nproc_rgrid_pattern = re.compile(r'\\s*nproc_rgrid\\s*=\\s*\\d+') nstate_pattern = re.compile(r'\\s*nstate\\s*=\\s*\\d+')", "0: t = t / 2 for j in range(0,17): if t %", "len(sys.argv) < 4: print '[Usage] ./{} <SALMON inputfile> <required # of node> <required", "print 'please replace the following inputlists.' print '' print_inputlists(inputmap) print '' print '#", "for s in f: if theory_pattern.match(s): inputmap['theory'] = string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] =", "= [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims elif dl_pattern.match(s): dims", "y2 = num_rgrid[1] % nprocs_rgrid[1] z1 = num_rgrid[1] % nprocs_rgrid[2] z2 = num_rgrid[2]", "if __name__ == '__main__': if len(sys.argv) < 4: print '[Usage] ./{} <SALMON inputfile>", "print ' mod(num_rgrid(1),nprocs_rgrid(2)) must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(2)) must be 0' print", "please check the nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol !=", "{},{},{}'.format(*inputmap['nproc_rgrid']) print ' process_allocation = \\'{}\\''.format(inputmap['process_allocation']) print '/' print '&system' print ' nstate", "' please check the nproc_ob' fail_exit() mb,nb = get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol", "for j in range(0,17): if t % 3 == 0: t = t", "list_prod(inputmap['nproc_rgrid']) * inputmap['nproc_ob'] * inputmap['nproc_k'] != nprocs_global: print '[INFO]' print 'product(nproc_rgrid) * nproc_ob", "* inputmap['nproc_k'] / nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print '' print '# =============================================== #'", "def to_boolean(s): if s.lower() == '\\'y\\'': return True elif s.lower() == '\\'n\\'': return", "{}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node = int(sys.argv[3]) nprocs_global = nnodes * nprocs_per_node if", "inputmap['nproc_rgrid']): print '[FFTE]' print 'num_rgrid and nproc_rgrid are available to use FFTE.' if", "(row,col) = ({},{})'.format(nprow,npcol) print 'blocking factor (row,col) = ({},{})'.format(mb,nb) print '[INFO]' print 'wave", "re.compile(r'\\s*al\\s*=\\s*\\d+') dl_pattern = re.compile(r'\\s*dl\\s*=\\s*\\d+') num_rgrid_pattern = re.compile(r'\\s*num_rgrid\\s*=\\s*\\d+') yn_ffte_pattern = re.compile(r'\\s*yn_ffte\\s*=\\s*') yn_scalapack_pattern= re.compile(r'\\s*yn_scalapack\\s*=\\s*') proc_alloc_pattern", "nproc_rgrid = get_nproc_rgrid(nprocs_global, nprocs_per_node, num_rgrid, inputmap['yn_ffte']) nproc_ob = nprocs_global / list_prod(nproc_rgrid) nproc_k =", "the following inputlists.' print '' print_inputlists(inputmap) print '' print '# =============================================== #' print", "suitable to run the application.' print '' print 'NXxNY = {}x{}'.format(sx,sy) print 'NX", "in dims] print '[INFO] num_rgrid constructed = {}'.format(inputmap['num_rgrid']) nnodes = int(sys.argv[2]) nprocs_per_node =", "x in number_pattern.findall(s): dims.append(int(x)) inputmap['nproc_rgrid'] = dims elif al_pattern.match(s): dims = [] for", "0 and z2 == 0 def check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3): t =", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "elif al_pattern.match(s): dims = [] for x in float_pattern.findall(s): dims.append(float(x.replace('d','e'))) inputmap['al'] = dims", "print '&system' print ' nstate = {}'.format(inputmap['nstate']) print '/' print '&rgrid' print '", "in range(0,26): if t % 2 == 0: t = t / 2", "t = t / 5 if t != 1: return False return True", "= int(math.sqrt(float(nprocs))) npcol = npcol + (npcol % 2) for ii in range(0,100):", "ii, nz]): if nprocs % list_prod([nprocs_per_node, ii, nz]) == 0: ny = ii", "'wave function size per MPI process' for i in range(0,3): print 'num_rgrid[{}] /", "'If you use mesh-torus network type system,' print 'probably, the following node shape", "== '\\'y\\'': return True elif s.lower() == '\\'n\\'': return False else: return False", "+ (ny % 2) for ii in range(ny,1,-1): if check_ffte_condition_gridsize(num_rgrid, [nprocs_per_node, ii, nz]):", "= string_pattern.search(s).group().strip('\\'').lower() elif nstate_pattern.match(s): inputmap['nstate'] = int(number_pattern.search(s).group()) elif nproc_k_pattern.match(s): inputmap['nproc_k'] = int(number_pattern.search(s).group()) elif", "must be 0' print ' mod(num_rgrid(2),nprocs_rgrid(3)) must be 0' print ' mod(num_rgrid(3),nprocs_rgrid(3)) must", "of MPI procs = {}'.format(nprocs_global) print 'calculate nproc_k,ob and rgrid' # find nproc_.*", "divided.' nzy = nprocs / nprocs_per_node nz = int(math.sqrt(float(nzy))) nz = nz +", "get_sl_blocking_factor(n, nprow, npcol) if (nb*npcol != n): n = max(nb*npcol, n) n =", "== 0 and z2 == 0 def check_ffte_condition_prime_factors(num_rgrid): for i in range(0,3): t", "{}'.format(float(inputmap['nstate'])/inputmap['nproc_ob']) sx = inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print ''", "= inputmap['nproc_ob'] * inputmap['nproc_k'] / nprocs_per_node sy = list_prod(inputmap['nproc_rgrid']) print '' print '#" ]
[ "= 'hi' def g(*args, **kwargs): nonlocal x return f(*args, **kwargs) return g @inject_x", "**kwargs) return g @inject_x def foo(): return x def inject_x_attempt_2(f): def wrapper(f): x", "'hi' def wrapped(*args, **kwargs): nonlocal x return f(*args, **kwargs) return wrapped return wrapper(f)", "python3 # encoding: utf-8 def inject_x(f): x = 'hi' def g(*args, **kwargs): nonlocal", "return wrapped return wrapper(f) @inject_x_attempt_2 def bar(): return x if __name__ == '__main__':", "NameError: print('attempt 1 failed') try: print(bar()) except NameError: print('attempt 2 failed') try: print(baz())", "encoding: utf-8 def inject_x(f): x = 'hi' def g(*args, **kwargs): nonlocal x return", "def bar(): return x if __name__ == '__main__': try: print(foo()) except NameError: print('attempt", "inject_x(f): x = 'hi' def g(*args, **kwargs): nonlocal x return f(*args, **kwargs) return", "== '__main__': try: print(foo()) except NameError: print('attempt 1 failed') try: print(bar()) except NameError:", "return x if __name__ == '__main__': try: print(foo()) except NameError: print('attempt 1 failed')", "nonlocal x return f(*args, **kwargs) return g @inject_x def foo(): return x def", "return f(*args, **kwargs) return g @inject_x def foo(): return x def inject_x_attempt_2(f): def", "try: print(bar()) except NameError: print('attempt 2 failed') try: print(baz()) except NameError: print('attempt 3", "def foo(): return x def inject_x_attempt_2(f): def wrapper(f): x = 'hi' def wrapped(*args,", "x = 'hi' def g(*args, **kwargs): nonlocal x return f(*args, **kwargs) return g", "**kwargs) return wrapped return wrapper(f) @inject_x_attempt_2 def bar(): return x if __name__ ==", "return wrapper(f) @inject_x_attempt_2 def bar(): return x if __name__ == '__main__': try: print(foo())", "f(*args, **kwargs) return wrapped return wrapper(f) @inject_x_attempt_2 def bar(): return x if __name__", "utf-8 def inject_x(f): x = 'hi' def g(*args, **kwargs): nonlocal x return f(*args,", "g @inject_x def foo(): return x def inject_x_attempt_2(f): def wrapper(f): x = 'hi'", "x = 'hi' def wrapped(*args, **kwargs): nonlocal x return f(*args, **kwargs) return wrapped", "print(foo()) except NameError: print('attempt 1 failed') try: print(bar()) except NameError: print('attempt 2 failed')", "nonlocal x return f(*args, **kwargs) return wrapped return wrapper(f) @inject_x_attempt_2 def bar(): return", "__name__ == '__main__': try: print(foo()) except NameError: print('attempt 1 failed') try: print(bar()) except", "except NameError: print('attempt 1 failed') try: print(bar()) except NameError: print('attempt 2 failed') try:", "= 'hi' def wrapped(*args, **kwargs): nonlocal x return f(*args, **kwargs) return wrapped return", "f(*args, **kwargs) return g @inject_x def foo(): return x def inject_x_attempt_2(f): def wrapper(f):", "#!/usr/bin/env python3 # encoding: utf-8 def inject_x(f): x = 'hi' def g(*args, **kwargs):", "g(*args, **kwargs): nonlocal x return f(*args, **kwargs) return g @inject_x def foo(): return", "def wrapper(f): x = 'hi' def wrapped(*args, **kwargs): nonlocal x return f(*args, **kwargs)", "failed') try: print(bar()) except NameError: print('attempt 2 failed') try: print(baz()) except NameError: print('attempt", "'hi' def g(*args, **kwargs): nonlocal x return f(*args, **kwargs) return g @inject_x def", "**kwargs): nonlocal x return f(*args, **kwargs) return g @inject_x def foo(): return x", "wrapped(*args, **kwargs): nonlocal x return f(*args, **kwargs) return wrapped return wrapper(f) @inject_x_attempt_2 def", "'__main__': try: print(foo()) except NameError: print('attempt 1 failed') try: print(bar()) except NameError: print('attempt", "return g @inject_x def foo(): return x def inject_x_attempt_2(f): def wrapper(f): x =", "@inject_x def foo(): return x def inject_x_attempt_2(f): def wrapper(f): x = 'hi' def", "@inject_x_attempt_2 def bar(): return x if __name__ == '__main__': try: print(foo()) except NameError:", "return f(*args, **kwargs) return wrapped return wrapper(f) @inject_x_attempt_2 def bar(): return x if", "def inject_x_attempt_2(f): def wrapper(f): x = 'hi' def wrapped(*args, **kwargs): nonlocal x return", "except NameError: print('attempt 2 failed') try: print(baz()) except NameError: print('attempt 3 failed') print(x)", "x def inject_x_attempt_2(f): def wrapper(f): x = 'hi' def wrapped(*args, **kwargs): nonlocal x", "inject_x_attempt_2(f): def wrapper(f): x = 'hi' def wrapped(*args, **kwargs): nonlocal x return f(*args,", "return x def inject_x_attempt_2(f): def wrapper(f): x = 'hi' def wrapped(*args, **kwargs): nonlocal", "x if __name__ == '__main__': try: print(foo()) except NameError: print('attempt 1 failed') try:", "def wrapped(*args, **kwargs): nonlocal x return f(*args, **kwargs) return wrapped return wrapper(f) @inject_x_attempt_2", "# encoding: utf-8 def inject_x(f): x = 'hi' def g(*args, **kwargs): nonlocal x", "foo(): return x def inject_x_attempt_2(f): def wrapper(f): x = 'hi' def wrapped(*args, **kwargs):", "**kwargs): nonlocal x return f(*args, **kwargs) return wrapped return wrapper(f) @inject_x_attempt_2 def bar():", "def g(*args, **kwargs): nonlocal x return f(*args, **kwargs) return g @inject_x def foo():", "print(bar()) except NameError: print('attempt 2 failed') try: print(baz()) except NameError: print('attempt 3 failed')", "wrapper(f) @inject_x_attempt_2 def bar(): return x if __name__ == '__main__': try: print(foo()) except", "wrapped return wrapper(f) @inject_x_attempt_2 def bar(): return x if __name__ == '__main__': try:", "wrapper(f): x = 'hi' def wrapped(*args, **kwargs): nonlocal x return f(*args, **kwargs) return", "x return f(*args, **kwargs) return g @inject_x def foo(): return x def inject_x_attempt_2(f):", "bar(): return x if __name__ == '__main__': try: print(foo()) except NameError: print('attempt 1", "try: print(foo()) except NameError: print('attempt 1 failed') try: print(bar()) except NameError: print('attempt 2", "1 failed') try: print(bar()) except NameError: print('attempt 2 failed') try: print(baz()) except NameError:", "x return f(*args, **kwargs) return wrapped return wrapper(f) @inject_x_attempt_2 def bar(): return x", "print('attempt 1 failed') try: print(bar()) except NameError: print('attempt 2 failed') try: print(baz()) except", "if __name__ == '__main__': try: print(foo()) except NameError: print('attempt 1 failed') try: print(bar())", "def inject_x(f): x = 'hi' def g(*args, **kwargs): nonlocal x return f(*args, **kwargs)" ]
[ "libs import numpy as np import xarray as xr # custom libs from", "TODO: small differences with ML at nlogl_1-nlogl_2 = 1.92 gp_pars = FitGEV_KMA_Frechet( bmus,", "basic import import os import os.path as op import sys import time sys.path.insert(0,", "pc = PathControl() p_tests = pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input", "custom libs from teslakit.project_site import PathControl from teslakit.extremes import FitGEV_KMA_Frechet # -------------------------------------- #", "op import sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs import numpy as", "np import xarray as xr # custom libs from teslakit.project_site import PathControl from", "# python libs import numpy as np import xarray as xr # custom", "import xarray as xr # custom libs from teslakit.project_site import PathControl from teslakit.extremes", "teslakit.project_site import PathControl from teslakit.extremes import FitGEV_KMA_Frechet # -------------------------------------- # Test data storage", "= npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print() # TODO: small differences", "python # -*- coding: utf-8 -*- # basic import import os import os.path", "Test data storage pc = PathControl() p_tests = pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator',", "input p_npz = op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- # Load data npzf = np.load(p_npz)", "= np.load(p_npz) bmus = npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus) print(n_clusters)", "-*- coding: utf-8 -*- # basic import import os import os.path as op", "# input p_npz = op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- # Load data npzf =", "import PathControl from teslakit.extremes import FitGEV_KMA_Frechet # -------------------------------------- # Test data storage pc", "= op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- # Load data npzf = np.load(p_npz) bmus =", "teslakit.extremes import FitGEV_KMA_Frechet # -------------------------------------- # Test data storage pc = PathControl() p_tests", "var_wvs = npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print() # TODO: small differences with ML", "op.join(op.dirname(__file__),'..','..')) # python libs import numpy as np import xarray as xr #", "PathControl() p_tests = pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz =", "os import os.path as op import sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python", "-------------------------------------- # Test data storage pc = PathControl() p_tests = pc.p_test_data p_test =", "'gev_fit_kma_fretchet') # input p_npz = op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- # Load data npzf", "print(var_wvs) print() # TODO: small differences with ML at nlogl_1-nlogl_2 = 1.92 gp_pars", "from teslakit.project_site import PathControl from teslakit.extremes import FitGEV_KMA_Frechet # -------------------------------------- # Test data", "# Test data storage pc = PathControl() p_tests = pc.p_test_data p_test = op.join(p_tests,", "'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz = op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- # Load data", "small differences with ML at nlogl_1-nlogl_2 = 1.92 gp_pars = FitGEV_KMA_Frechet( bmus, n_clusters,", "PathControl from teslakit.extremes import FitGEV_KMA_Frechet # -------------------------------------- # Test data storage pc =", "sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs import numpy as np import", "npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print() # TODO: small differences with", "op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz = op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- # Load", "os.path as op import sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs import", "utf-8 -*- # basic import import os import os.path as op import sys", "data npzf = np.load(p_npz) bmus = npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs = npzf['arr_2']", "numpy as np import xarray as xr # custom libs from teslakit.project_site import", "import FitGEV_KMA_Frechet # -------------------------------------- # Test data storage pc = PathControl() p_tests =", "p_tests = pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz = op.join(p_test,", "npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print() # TODO:", "sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs import numpy as np import xarray as xr", "= npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print() #", "# -*- coding: utf-8 -*- # basic import import os import os.path as", "import os import os.path as op import sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) #", "bmus = npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print()", "= op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz = op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- #", "FitGEV_KMA_Frechet # -------------------------------------- # Test data storage pc = PathControl() p_tests = pc.p_test_data", "# Load data npzf = np.load(p_npz) bmus = npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs", "npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print() # TODO: small differences with ML at nlogl_1-nlogl_2", "npzf = np.load(p_npz) bmus = npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus)", "# -------------------------------------- # Load data npzf = np.load(p_npz) bmus = npzf['arr_0'] n_clusters =", "print() # TODO: small differences with ML at nlogl_1-nlogl_2 = 1.92 gp_pars =", "= pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz = op.join(p_test, 'swell_1_Hs.npz')", "#!/usr/bin/env python # -*- coding: utf-8 -*- # basic import import os import", "as xr # custom libs from teslakit.project_site import PathControl from teslakit.extremes import FitGEV_KMA_Frechet", "data storage pc = PathControl() p_tests = pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet')", "storage pc = PathControl() p_tests = pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') #", "from teslakit.extremes import FitGEV_KMA_Frechet # -------------------------------------- # Test data storage pc = PathControl()", "# TODO: small differences with ML at nlogl_1-nlogl_2 = 1.92 gp_pars = FitGEV_KMA_Frechet(", "-*- # basic import import os import os.path as op import sys import", "# custom libs from teslakit.project_site import PathControl from teslakit.extremes import FitGEV_KMA_Frechet # --------------------------------------", "print(n_clusters) print(var_wvs) print() # TODO: small differences with ML at nlogl_1-nlogl_2 = 1.92", "coding: utf-8 -*- # basic import import os import os.path as op import", "time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs import numpy as np import xarray as", "n_clusters = npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print() # TODO: small", "as op import sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs import numpy", "differences with ML at nlogl_1-nlogl_2 = 1.92 gp_pars = FitGEV_KMA_Frechet( bmus, n_clusters, var_wvs)", "pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz = op.join(p_test, 'swell_1_Hs.npz') #", "= PathControl() p_tests = pc.p_test_data p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz", "libs from teslakit.project_site import PathControl from teslakit.extremes import FitGEV_KMA_Frechet # -------------------------------------- # Test", "xarray as xr # custom libs from teslakit.project_site import PathControl from teslakit.extremes import", "np.load(p_npz) bmus = npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs = npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs)", "Load data npzf = np.load(p_npz) bmus = npzf['arr_0'] n_clusters = npzf['arr_1'] var_wvs =", "import sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs import numpy as np", "import numpy as np import xarray as xr # custom libs from teslakit.project_site", "-------------------------------------- # Load data npzf = np.load(p_npz) bmus = npzf['arr_0'] n_clusters = npzf['arr_1']", "print(bmus) print(n_clusters) print(var_wvs) print() # TODO: small differences with ML at nlogl_1-nlogl_2 =", "'swell_1_Hs.npz') # -------------------------------------- # Load data npzf = np.load(p_npz) bmus = npzf['arr_0'] n_clusters", "import os.path as op import sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs", "# basic import import os import os.path as op import sys import time", "p_test = op.join(p_tests, 'ClimateEmulator', 'gev_fit_kma_fretchet') # input p_npz = op.join(p_test, 'swell_1_Hs.npz') # --------------------------------------", "# -------------------------------------- # Test data storage pc = PathControl() p_tests = pc.p_test_data p_test", "p_npz = op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- # Load data npzf = np.load(p_npz) bmus", "import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..')) # python libs import numpy as np import xarray", "op.join(p_test, 'swell_1_Hs.npz') # -------------------------------------- # Load data npzf = np.load(p_npz) bmus = npzf['arr_0']", "= npzf['arr_2'] print(bmus) print(n_clusters) print(var_wvs) print() # TODO: small differences with ML at", "xr # custom libs from teslakit.project_site import PathControl from teslakit.extremes import FitGEV_KMA_Frechet #", "as np import xarray as xr # custom libs from teslakit.project_site import PathControl", "import import os import os.path as op import sys import time sys.path.insert(0, op.join(op.dirname(__file__),'..','..'))", "python libs import numpy as np import xarray as xr # custom libs", "with ML at nlogl_1-nlogl_2 = 1.92 gp_pars = FitGEV_KMA_Frechet( bmus, n_clusters, var_wvs) print(gp_pars)" ]
[ "20 def preprocess_image(image_path, is_train=True): \"\"\" 1. read image 2. resize image to 256", "build_vocab(ingredients): \"\"\" \"\"\" vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2} tokens =", "param is_train: True if processing images for training (sample random image patch) or", "= i + 3 return vocab def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path): \"\"\"", "- 2)] padded_ing = [START_TOKEN] + padded_ing + [STOP_TOKEN] + [PAD_TOKEN] * (", "(sample random image patch) or False if processing for testing (sample central image", "skimage import io, transform, img_as_float32 import random START_TOKEN = \"*START*\" # no index", "pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients", "import random START_TOKEN = \"*START*\" # no index as start token is not", "else: start_r = (newshape[0] - 224) // 2 start_c = (newshape[1] - 224)", "// 2 start_c = (newshape[1] - 224) // 2 image = image[start_r:start_r+224, start_c:start_c+224,", "vocab = build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients =", "os.path.join(r, file) if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab", "i in ingredients: tokens.extend(i) all_words = sorted(list(set(tokens))) for i, word in enumerate(all_words): vocab[word]", "word in sentence] for sentence in sentences]) def build_vocab(ingredients): \"\"\" \"\"\" vocab =", "os import numpy as np from PIL import Image from skimage import io,", "0 start_c = 0 if is_train: start_r = random.randint(0, newshape[0]-224) start_c = random.randint(0,", "def preprocess_image(image_path, is_train=True): \"\"\" 1. read image 2. resize image to 256 *", "padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients = convert_to_id(vocab,", "size (len(image_paths), 224, 224, 3) containing the preprocessed images \"\"\" return np.stack([preprocess_image(path, is_train)", "2 start_c = (newshape[1] - 224) // 2 image = image[start_r:start_r+224, start_c:start_c+224, :]", "paper doesn't) 5. return processed image \"\"\" image = io.imread(image_path) h,w,c = image.shape", "(256,256,3) if h > w: newshape = (int((256.0 / float(w)) * float(h)), 256,", "(256, int((256.0 / float(h)) * float(w)), c) image = transform.resize(image, newshape, anti_aliasing=True) start_r", "= [] for i in ingredients: tokens.extend(i) all_words = sorted(list(set(tokens))) for i, word", "padded_ing = line[:(WINDOW_SIZE - 2)] padded_ing = [START_TOKEN] + padded_ing + [STOP_TOKEN] +", "= np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients = convert_to_id(vocab, padded_test_ingredients)", "train_file = open(train_image_path, \"r\") test_file = open(test_image_path, \"r\") for line in train_file: splitline", "processed image \"\"\" image = io.imread(image_path) h,w,c = image.shape newshape = (256,256,3) if", "[STOP_TOKEN] + [PAD_TOKEN] * ( WINDOW_SIZE - len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list", "image locations (such as a length batch_size slice of a larger list) param", "= \"*STOP*\" #Index: 0 PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN = \"*UNK*\" #Index:", "str[:-1] if str in ingredients_dict: pth = os.path.join(r, file) if pth in train_image_paths:", "splitline[1])) for line in test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r,", "2} tokens = [] for i in ingredients: tokens.extend(i) all_words = sorted(list(set(tokens))) for", "[START_TOKEN] + padded_ing + [STOP_TOKEN] + [PAD_TOKEN] * ( WINDOW_SIZE - len(padded_ing) -", "of a larger list) param is_train: True if processing images for training (sample", "for i, word in enumerate(all_words): vocab[word] = i + 3 return vocab def", "if str in ingredients_dict: pth = os.path.join(r, file) if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\"))", "if is_train: start_r = random.randint(0, newshape[0]-224) start_c = random.randint(0, newshape[1]-224) else: start_r =", "random START_TOKEN = \"*START*\" # no index as start token is not added", "newshape = (256, int((256.0 / float(h)) * float(w)), c) image = transform.resize(image, newshape,", "= os.path.join(r, file) if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\"))", "train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients", "[] for line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {} for i in range(len(ingredients)):", "image patch) or False if processing for testing (sample central image patch) return:", "1 UNK_TOKEN = \"*UNK*\" #Index: 2 WINDOW_SIZE = 20 def preprocess_image(image_path, is_train=True): \"\"\"", "tokens.extend(i) all_words = sorted(list(set(tokens))) for i, word in enumerate(all_words): vocab[word] = i +", "{} for i in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list = [] test_ingredient_list =", "ingredient_list: line = list(set(line)) padded_ing = line[:(WINDOW_SIZE - 2)] padded_ing = [START_TOKEN] +", "splitline[0], splitline[1])) for r, d, f in os.walk(images): for file in f: name", "start_c = random.randint(0, newshape[1]-224) else: start_r = (newshape[0] - 224) // 2 start_c", "[] for i in ingredients: tokens.extend(i) all_words = sorted(list(set(tokens))) for i, word in", "\"r\") for line in train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line", "for line in class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\") ingredients = [] for", "import sys, os import numpy as np from PIL import Image from skimage", "line in ingredient_list: line = list(set(line)) padded_ing = line[:(WINDOW_SIZE - 2)] padded_ing =", "/ float(h)) * float(w)), c) image = transform.resize(image, newshape, anti_aliasing=True) start_r = 0", "(newshape[1] - 224) // 2 image = image[start_r:start_r+224, start_c:start_c+224, :] return image def", "preprocess_image(image_path, is_train=True): \"\"\" 1. read image 2. resize image to 256 * 256", "is not added to vocabulary as we don't want to predict start STOP_TOKEN", "splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r, d, f in os.walk(images): for", "since original paper doesn't) 5. return processed image \"\"\" image = io.imread(image_path) h,w,c", "\"\" for word in name: str += word + \" \" str =", "splitline[1])) for r, d, f in os.walk(images): for file in f: name =", "don't want to predict start STOP_TOKEN = \"*STOP*\" #Index: 0 PAD_TOKEN = \"*<PASSWORD>*\"", "STOP_TOKEN = \"*STOP*\" #Index: 0 PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN = \"*UNK*\"", "open(test_image_path, \"r\") for line in train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for", "vocab[word] = i + 3 return vocab def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path):", "float(w)), c) image = transform.resize(image, newshape, anti_aliasing=True) start_r = 0 start_c = 0", "processing for testing (sample central image patch) return: a numpy array of size", "(sample central image patch) return: a numpy array of size (len(image_paths), 224, 224,", "vocab[UNK_TOKEN] for word in sentence] for sentence in sentences]) def build_vocab(ingredients): \"\"\" \"\"\"", "train_ingredient_list = [] test_ingredient_list = [] train_image_paths = [] test_image_paths = [] train_file", "newshape = (int((256.0 / float(w)) * float(h)), 256, c) elif h < w:", "256 * 256 * 3 3. randomly sample a 224 * 224 *3", "2)] padded_ing = [START_TOKEN] + padded_ing + [STOP_TOKEN] + [PAD_TOKEN] * ( WINDOW_SIZE", "[] train_image_paths = [] test_image_paths = [] train_file = open(train_image_path, \"r\") test_file =", "0, PAD_TOKEN: 1, UNK_TOKEN: 2} tokens = [] for i in ingredients: tokens.extend(i)", "- 224) // 2 start_c = (newshape[1] - 224) // 2 image =", "* 224 *3 patch of the image 4. normalize image intensity? (NOTE: not", "to image locations (such as a length batch_size slice of a larger list)", "a numpy array of size (len(image_paths), 224, 224, 3) containing the preprocessed images", "train_image_path, test_image_path): \"\"\" \"\"\" class_file = open(classes_path, \"r\") classes = [] for line", "ingredients_dict: pth = os.path.join(r, file) if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in", "line = list(set(line)) padded_ing = line[:(WINDOW_SIZE - 2)] padded_ing = [START_TOKEN] + padded_ing", "= [] for line in class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\") ingredients =", "[] test_image_paths = [] train_file = open(train_image_path, \"r\") test_file = open(test_image_path, \"r\") for", "= file.split(\"_\")[1:-1] str = \"\" for word in name: str += word +", "a list of paths to image locations (such as a length batch_size slice", "h > w: newshape = (int((256.0 / float(w)) * float(h)), 256, c) elif", "\"*UNK*\" #Index: 2 WINDOW_SIZE = 20 def preprocess_image(image_path, is_train=True): \"\"\" 1. read image", "start_c = 0 if is_train: start_r = random.randint(0, newshape[0]-224) start_c = random.randint(0, newshape[1]-224)", "len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\" \"\"\" return np.stack([[vocab[word]", "test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r, d, f in os.walk(images):", "np.stack([preprocess_image(path, is_train) for path in image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list =", "test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r, d, f in os.walk(images): for file in f:", "in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list + test_ingredient_list)", "test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list))", "START_TOKEN = \"*START*\" # no index as start token is not added to", "vocab def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path): \"\"\" \"\"\" class_file = open(classes_path, \"r\")", "classes = [] for line in class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\") ingredients", "line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r, d, f in os.walk(images): for file in", "= str[:-1] if str in ingredients_dict: pth = os.path.join(r, file) if pth in", "images \"\"\" return np.stack([preprocess_image(path, is_train) for path in image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\"", "#Index: 0 PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN = \"*UNK*\" #Index: 2 WINDOW_SIZE", "sample a 224 * 224 *3 patch of the image 4. normalize image", "return image def get_image_batch(image_paths, is_train=True): \"\"\" param image_paths: a list of paths to", "* 256 * 3 3. randomly sample a 224 * 224 *3 patch", "file.split(\"_\")[1:-1] str = \"\" for word in name: str += word + \"", "\"\"\" \"\"\" vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2} tokens = []", "image = io.imread(image_path) h,w,c = image.shape newshape = (256,256,3) if h > w:", "str += word + \" \" str = str[:-1] if str in ingredients_dict:", "word + \" \" str = str[:-1] if str in ingredients_dict: pth =", "= build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab,", "f in os.walk(images): for file in f: name = file.split(\"_\")[1:-1] str = \"\"", "d, f in os.walk(images): for file in f: name = file.split(\"_\")[1:-1] str =", "(NOTE: not currently doing this, which is fine since original paper doesn't) 5.", "= np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients = convert_to_id(vocab, padded_test_ingredients) return train_image_paths, train_ingredients,", "as a length batch_size slice of a larger list) param is_train: True if", "= random.randint(0, newshape[1]-224) else: start_r = (newshape[0] - 224) // 2 start_c =", "which is fine since original paper doesn't) 5. return processed image \"\"\" image", "in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list = [] test_ingredient_list = [] train_image_paths =", "np.stack([[vocab[word] if word in vocab else vocab[UNK_TOKEN] for word in sentence] for sentence", ":] return image def get_image_batch(image_paths, is_train=True): \"\"\" param image_paths: a list of paths", "i + 3 return vocab def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path): \"\"\" \"\"\"", "is_train=True): \"\"\" 1. read image 2. resize image to 256 * 256 *", "def convert_to_id(vocab, sentences): \"\"\" \"\"\" return np.stack([[vocab[word] if word in vocab else vocab[UNK_TOKEN]", "img_as_float32 import random START_TOKEN = \"*START*\" # no index as start token is", "= open(classes_path, \"r\") classes = [] for line in class_file: classes.append(line.rstrip().lower()) ingredients_file =", "for word in sentence] for sentence in sentences]) def build_vocab(ingredients): \"\"\" \"\"\" vocab", "* ( WINDOW_SIZE - len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab, sentences):", "= 20 def preprocess_image(image_path, is_train=True): \"\"\" 1. read image 2. resize image to", "c) elif h < w: newshape = (256, int((256.0 / float(h)) * float(w)),", "patch of the image 4. normalize image intensity? (NOTE: not currently doing this,", "* float(w)), c) image = transform.resize(image, newshape, anti_aliasing=True) start_r = 0 start_c =", "normalize image intensity? (NOTE: not currently doing this, which is fine since original", "the image 4. normalize image intensity? (NOTE: not currently doing this, which is", "return np.stack([preprocess_image(path, is_train) for path in image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list", "in name: str += word + \" \" str = str[:-1] if str", "\"*STOP*\" #Index: 0 PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN = \"*UNK*\" #Index: 2", "224 * 224 *3 patch of the image 4. normalize image intensity? (NOTE:", "\"r\") classes = [] for line in class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\")", "currently doing this, which is fine since original paper doesn't) 5. return processed", "\" str = str[:-1] if str in ingredients_dict: pth = os.path.join(r, file) if", "image patch) return: a numpy array of size (len(image_paths), 224, 224, 3) containing", "or False if processing for testing (sample central image patch) return: a numpy", "newshape[0]-224) start_c = random.randint(0, newshape[1]-224) else: start_r = (newshape[0] - 224) // 2", "test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients", "image_paths: a list of paths to image locations (such as a length batch_size", "in ingredients_dict: pth = os.path.join(r, file) if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth", "elif h < w: newshape = (256, int((256.0 / float(h)) * float(w)), c)", "for i in ingredients: tokens.extend(i) all_words = sorted(list(set(tokens))) for i, word in enumerate(all_words):", "from PIL import Image from skimage import io, transform, img_as_float32 import random START_TOKEN", "train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line in test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1]))", "image def get_image_batch(image_paths, is_train=True): \"\"\" param image_paths: a list of paths to image", "in ingredients: tokens.extend(i) all_words = sorted(list(set(tokens))) for i, word in enumerate(all_words): vocab[word] =", "a 224 * 224 *3 patch of the image 4. normalize image intensity?", "def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list = [] for line in ingredient_list: line =", "ingredients_file = open(ingredients_path, \"r\") ingredients = [] for line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict", "image.shape newshape = (256,256,3) if h > w: newshape = (int((256.0 / float(w))", "float(w)) * float(h)), 256, c) elif h < w: newshape = (256, int((256.0", "= (256, int((256.0 / float(h)) * float(w)), c) image = transform.resize(image, newshape, anti_aliasing=True)", "name = file.split(\"_\")[1:-1] str = \"\" for word in name: str += word", "f: name = file.split(\"_\")[1:-1] str = \"\" for word in name: str +=", "str = \"\" for word in name: str += word + \" \"", "for file in f: name = file.split(\"_\")[1:-1] str = \"\" for word in", "resize image to 256 * 256 * 3 3. randomly sample a 224", "in sentence] for sentence in sentences]) def build_vocab(ingredients): \"\"\" \"\"\" vocab = {STOP_TOKEN:", "image = transform.resize(image, newshape, anti_aliasing=True) start_r = 0 start_c = 0 if is_train:", "the preprocessed images \"\"\" return np.stack([preprocess_image(path, is_train) for path in image_paths], axis=0) def", "transform.resize(image, newshape, anti_aliasing=True) start_r = 0 start_c = 0 if is_train: start_r =", "= 0 start_c = 0 if is_train: start_r = random.randint(0, newshape[0]-224) start_c =", "= transform.resize(image, newshape, anti_aliasing=True) start_r = 0 start_c = 0 if is_train: start_r", "[] for line in class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\") ingredients = []", "= open(ingredients_path, \"r\") ingredients = [] for line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict =", "for testing (sample central image patch) return: a numpy array of size (len(image_paths),", "random.randint(0, newshape[1]-224) else: start_r = (newshape[0] - 224) // 2 start_c = (newshape[1]", "< w: newshape = (256, int((256.0 / float(h)) * float(w)), c) image =", "for r, d, f in os.walk(images): for file in f: name = file.split(\"_\")[1:-1]", "tokens = [] for i in ingredients: tokens.extend(i) all_words = sorted(list(set(tokens))) for i,", "for line in ingredient_list: line = list(set(line)) padded_ing = line[:(WINDOW_SIZE - 2)] padded_ing", "ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {} for i in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list", "sys, os import numpy as np from PIL import Image from skimage import", "h < w: newshape = (256, int((256.0 / float(h)) * float(w)), c) image", "this, which is fine since original paper doesn't) 5. return processed image \"\"\"", "preprocessed images \"\"\" return np.stack([preprocess_image(path, is_train) for path in image_paths], axis=0) def pad_ingredients(ingredient_list):", "padded_ing = [START_TOKEN] + padded_ing + [STOP_TOKEN] + [PAD_TOKEN] * ( WINDOW_SIZE -", "word in name: str += word + \" \" str = str[:-1] if", "list(set(line)) padded_ing = line[:(WINDOW_SIZE - 2)] padded_ing = [START_TOKEN] + padded_ing + [STOP_TOKEN]", "= sorted(list(set(tokens))) for i, word in enumerate(all_words): vocab[word] = i + 3 return", "+ 3 return vocab def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path): \"\"\" \"\"\" class_file", "\"*<PASSWORD>*\" #Index: 1 UNK_TOKEN = \"*UNK*\" #Index: 2 WINDOW_SIZE = 20 def preprocess_image(image_path,", "np from PIL import Image from skimage import io, transform, img_as_float32 import random", "4. normalize image intensity? (NOTE: not currently doing this, which is fine since", "is_train: start_r = random.randint(0, newshape[0]-224) start_c = random.randint(0, newshape[1]-224) else: start_r = (newshape[0]", "file in f: name = file.split(\"_\")[1:-1] str = \"\" for word in name:", "= \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN = \"*UNK*\" #Index: 2 WINDOW_SIZE = 20 def", "= random.randint(0, newshape[0]-224) start_c = random.randint(0, newshape[1]-224) else: start_r = (newshape[0] - 224)", "\"\"\" param image_paths: a list of paths to image locations (such as a", "containing the preprocessed images \"\"\" return np.stack([preprocess_image(path, is_train) for path in image_paths], axis=0)", "image 2. resize image to 256 * 256 * 3 3. randomly sample", "test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients =", "= image.shape newshape = (256,256,3) if h > w: newshape = (int((256.0 /", "pth = os.path.join(r, file) if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths:", "as np from PIL import Image from skimage import io, transform, img_as_float32 import", "token is not added to vocabulary as we don't want to predict start", "central image patch) return: a numpy array of size (len(image_paths), 224, 224, 3)", "not currently doing this, which is fine since original paper doesn't) 5. return", "randomly sample a 224 * 224 *3 patch of the image 4. normalize", "WINDOW_SIZE = 20 def preprocess_image(image_path, is_train=True): \"\"\" 1. read image 2. resize image", "start_r = 0 start_c = 0 if is_train: start_r = random.randint(0, newshape[0]-224) start_c", "patch) return: a numpy array of size (len(image_paths), 224, 224, 3) containing the", "of paths to image locations (such as a length batch_size slice of a", "read image 2. resize image to 256 * 256 * 3 3. randomly", "images for training (sample random image patch) or False if processing for testing", "[PAD_TOKEN] * ( WINDOW_SIZE - len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab,", "import io, transform, img_as_float32 import random START_TOKEN = \"*START*\" # no index as", "3 3. randomly sample a 224 * 224 *3 patch of the image", "+ padded_ing + [STOP_TOKEN] + [PAD_TOKEN] * ( WINDOW_SIZE - len(padded_ing) - 1)", "is_train: True if processing images for training (sample random image patch) or False", "= ingredients[i] train_ingredient_list = [] test_ingredient_list = [] train_image_paths = [] test_image_paths =", "array of size (len(image_paths), 224, 224, 3) containing the preprocessed images \"\"\" return", "of the image 4. normalize image intensity? (NOTE: not currently doing this, which", "images, train_image_path, test_image_path): \"\"\" \"\"\" class_file = open(classes_path, \"r\") classes = [] for", "for word in name: str += word + \" \" str = str[:-1]", "numpy array of size (len(image_paths), 224, 224, 3) containing the preprocessed images \"\"\"", "we don't want to predict start STOP_TOKEN = \"*STOP*\" #Index: 0 PAD_TOKEN =", "float(h)), 256, c) elif h < w: newshape = (256, int((256.0 / float(h))", "image[start_r:start_r+224, start_c:start_c+224, :] return image def get_image_batch(image_paths, is_train=True): \"\"\" param image_paths: a list", "classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\") ingredients = [] for line in ingredients_file: ingredients.append(line.rstrip().lower())", "random.randint(0, newshape[0]-224) start_c = random.randint(0, newshape[1]-224) else: start_r = (newshape[0] - 224) //", "anti_aliasing=True) start_r = 0 start_c = 0 if is_train: start_r = random.randint(0, newshape[0]-224)", "+ test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients", "batch_size slice of a larger list) param is_train: True if processing images for", "i, word in enumerate(all_words): vocab[word] = i + 3 return vocab def get_data(classes_path,", "test_image_path): \"\"\" \"\"\" class_file = open(classes_path, \"r\") classes = [] for line in", "# no index as start token is not added to vocabulary as we", "= io.imread(image_path) h,w,c = image.shape newshape = (256,256,3) if h > w: newshape", "if word in vocab else vocab[UNK_TOKEN] for word in sentence] for sentence in", "is_train) for path in image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list = []", "ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list = [] test_ingredient_list = [] train_image_paths = [] test_image_paths", "\"\"\" return np.stack([preprocess_image(path, is_train) for path in image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\"", "w: newshape = (int((256.0 / float(w)) * float(h)), 256, c) elif h <", "image to 256 * 256 * 3 3. randomly sample a 224 *", "+ [STOP_TOKEN] + [PAD_TOKEN] * ( WINDOW_SIZE - len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return", "slice of a larger list) param is_train: True if processing images for training", "line in train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line in test_file:", "( WINDOW_SIZE - len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\"", "start_c:start_c+224, :] return image def get_image_batch(image_paths, is_train=True): \"\"\" param image_paths: a list of", "= [START_TOKEN] + padded_ing + [STOP_TOKEN] + [PAD_TOKEN] * ( WINDOW_SIZE - len(padded_ing)", "axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list = [] for line in ingredient_list: line", "return: a numpy array of size (len(image_paths), 224, 224, 3) containing the preprocessed", "\"\"\" return np.stack([[vocab[word] if word in vocab else vocab[UNK_TOKEN] for word in sentence]", "\"\"\" vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2} tokens = [] for", "of size (len(image_paths), 224, 224, 3) containing the preprocessed images \"\"\" return np.stack([preprocess_image(path,", "str = str[:-1] if str in ingredients_dict: pth = os.path.join(r, file) if pth", "image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list = [] for line in ingredient_list:", "line in class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\") ingredients = [] for line", "padded_ing + [STOP_TOKEN] + [PAD_TOKEN] * ( WINDOW_SIZE - len(padded_ing) - 1) padded_ingredients_list.append(padded_ing)", "a larger list) param is_train: True if processing images for training (sample random", "\"r\") ingredients = [] for line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {} for", "test_image_paths = [] train_file = open(train_image_path, \"r\") test_file = open(test_image_path, \"r\") for line", "is fine since original paper doesn't) 5. return processed image \"\"\" image =", "224) // 2 start_c = (newshape[1] - 224) // 2 image = image[start_r:start_r+224,", "train_image_paths = [] test_image_paths = [] train_file = open(train_image_path, \"r\") test_file = open(test_image_path,", "= [] train_file = open(train_image_path, \"r\") test_file = open(test_image_path, \"r\") for line in", "in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients =", "0 PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN = \"*UNK*\" #Index: 2 WINDOW_SIZE =", "predict start STOP_TOKEN = \"*STOP*\" #Index: 0 PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN", "if processing images for training (sample random image patch) or False if processing", "= (newshape[1] - 224) // 2 image = image[start_r:start_r+224, start_c:start_c+224, :] return image", "not added to vocabulary as we don't want to predict start STOP_TOKEN =", "if h > w: newshape = (int((256.0 / float(w)) * float(h)), 256, c)", "ingredients: tokens.extend(i) all_words = sorted(list(set(tokens))) for i, word in enumerate(all_words): vocab[word] = i", "all_words = sorted(list(set(tokens))) for i, word in enumerate(all_words): vocab[word] = i + 3", "open(ingredients_path, \"r\") ingredients = [] for line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {}", "// 2 image = image[start_r:start_r+224, start_c:start_c+224, :] return image def get_image_batch(image_paths, is_train=True): \"\"\"", "for line in test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r, d,", "is_train=True): \"\"\" param image_paths: a list of paths to image locations (such as", "= [] for line in ingredient_list: line = list(set(line)) padded_ing = line[:(WINDOW_SIZE -", "256, c) elif h < w: newshape = (256, int((256.0 / float(h)) *", "class_file = open(classes_path, \"r\") classes = [] for line in class_file: classes.append(line.rstrip().lower()) ingredients_file", "= (256,256,3) if h > w: newshape = (int((256.0 / float(w)) * float(h)),", "<filename>src/preprocess_clean.py import sys, os import numpy as np from PIL import Image from", "1. read image 2. resize image to 256 * 256 * 3 3.", "word in vocab else vocab[UNK_TOKEN] for word in sentence] for sentence in sentences])", "for line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {} for i in range(len(ingredients)): ingredients_dict[classes[i]]", "as start token is not added to vocabulary as we don't want to", "list of paths to image locations (such as a length batch_size slice of", "3) containing the preprocessed images \"\"\" return np.stack([preprocess_image(path, is_train) for path in image_paths],", "= [] test_image_paths = [] train_file = open(train_image_path, \"r\") test_file = open(test_image_path, \"r\")", "original paper doesn't) 5. return processed image \"\"\" image = io.imread(image_path) h,w,c =", "def build_vocab(ingredients): \"\"\" \"\"\" vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2} tokens", "in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {} for i in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i]", "testing (sample central image patch) return: a numpy array of size (len(image_paths), 224,", "\"\"\" image = io.imread(image_path) h,w,c = image.shape newshape = (256,256,3) if h >", "= \"*UNK*\" #Index: 2 WINDOW_SIZE = 20 def preprocess_image(image_path, is_train=True): \"\"\" 1. read", "get_image_batch(image_paths, is_train=True): \"\"\" param image_paths: a list of paths to image locations (such", "to vocabulary as we don't want to predict start STOP_TOKEN = \"*STOP*\" #Index:", "range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list = [] test_ingredient_list = [] train_image_paths = []", "padded_ingredients_list = [] for line in ingredient_list: line = list(set(line)) padded_ing = line[:(WINDOW_SIZE", "UNK_TOKEN: 2} tokens = [] for i in ingredients: tokens.extend(i) all_words = sorted(list(set(tokens)))", "= open(test_image_path, \"r\") for line in train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1]))", "return vocab def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path): \"\"\" \"\"\" class_file = open(classes_path,", "vocab else vocab[UNK_TOKEN] for word in sentence] for sentence in sentences]) def build_vocab(ingredients):", "start_r = random.randint(0, newshape[0]-224) start_c = random.randint(0, newshape[1]-224) else: start_r = (newshape[0] -", "False if processing for testing (sample central image patch) return: a numpy array", "+ \" \" str = str[:-1] if str in ingredients_dict: pth = os.path.join(r,", "= 0 if is_train: start_r = random.randint(0, newshape[0]-224) start_c = random.randint(0, newshape[1]-224) else:", "+ [PAD_TOKEN] * ( WINDOW_SIZE - len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def", "for sentence in sentences]) def build_vocab(ingredients): \"\"\" \"\"\" vocab = {STOP_TOKEN: 0, PAD_TOKEN:", "* 3 3. randomly sample a 224 * 224 *3 patch of the", "= {} for i in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list = [] test_ingredient_list", "training (sample random image patch) or False if processing for testing (sample central", "def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path): \"\"\" \"\"\" class_file = open(classes_path, \"r\") classes", "test_ingredient_list = [] train_image_paths = [] test_image_paths = [] train_file = open(train_image_path, \"r\")", "for path in image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list = [] for", "in ingredient_list: line = list(set(line)) padded_ing = line[:(WINDOW_SIZE - 2)] padded_ing = [START_TOKEN]", "in image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list = [] for line in", "image = image[start_r:start_r+224, start_c:start_c+224, :] return image def get_image_batch(image_paths, is_train=True): \"\"\" param image_paths:", "from skimage import io, transform, img_as_float32 import random START_TOKEN = \"*START*\" # no", "elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list))", "class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\") ingredients = [] for line in ingredients_file:", "fine since original paper doesn't) 5. return processed image \"\"\" image = io.imread(image_path)", "h,w,c = image.shape newshape = (256,256,3) if h > w: newshape = (int((256.0", "- len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\" \"\"\" return", "PIL import Image from skimage import io, transform, img_as_float32 import random START_TOKEN =", "\"\"\" \"\"\" class_file = open(classes_path, \"r\") classes = [] for line in class_file:", "to predict start STOP_TOKEN = \"*STOP*\" #Index: 0 PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1", "padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients = convert_to_id(vocab, padded_test_ingredients) return train_image_paths,", "line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {} for i in range(len(ingredients)): ingredients_dict[classes[i]] =", "test_file = open(test_image_path, \"r\") for line in train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0],", "length batch_size slice of a larger list) param is_train: True if processing images", "line[:(WINDOW_SIZE - 2)] padded_ing = [START_TOKEN] + padded_ing + [STOP_TOKEN] + [PAD_TOKEN] *", "transform, img_as_float32 import random START_TOKEN = \"*START*\" # no index as start token", "if processing for testing (sample central image patch) return: a numpy array of", "get_data(classes_path, ingredients_path, images, train_image_path, test_image_path): \"\"\" \"\"\" class_file = open(classes_path, \"r\") classes =", "\"r\") test_file = open(test_image_path, \"r\") for line in train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images,", "sentence in sentences]) def build_vocab(ingredients): \"\"\" \"\"\" vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1,", "- 224) // 2 image = image[start_r:start_r+224, start_c:start_c+224, :] return image def get_image_batch(image_paths,", "sentences): \"\"\" \"\"\" return np.stack([[vocab[word] if word in vocab else vocab[UNK_TOKEN] for word", "param image_paths: a list of paths to image locations (such as a length", "padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\" \"\"\" return np.stack([[vocab[word] if word in", "train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients = convert_to_id(vocab, padded_test_ingredients) return train_image_paths, train_ingredients, test_image_paths, test_ingredients,", "sentences]) def build_vocab(ingredients): \"\"\" \"\"\" vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2}", "1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\" \"\"\" return np.stack([[vocab[word] if word", "\"\"\" 1. read image 2. resize image to 256 * 256 * 3", "c) image = transform.resize(image, newshape, anti_aliasing=True) start_r = 0 start_c = 0 if", "w: newshape = (256, int((256.0 / float(h)) * float(w)), c) image = transform.resize(image,", "return np.stack([[vocab[word] if word in vocab else vocab[UNK_TOKEN] for word in sentence] for", "= line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r, d, f in os.walk(images): for file", "ingredients.append(line.rstrip().lower()) ingredients_dict = {} for i in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list =", "= [] for line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {} for i in", "float(h)) * float(w)), c) image = transform.resize(image, newshape, anti_aliasing=True) start_r = 0 start_c", "locations (such as a length batch_size slice of a larger list) param is_train:", "for training (sample random image patch) or False if processing for testing (sample", "int((256.0 / float(h)) * float(w)), c) image = transform.resize(image, newshape, anti_aliasing=True) start_r =", "ingredients = [] for line in ingredients_file: ingredients.append(line.rstrip().lower()) ingredients_dict = {} for i", "\"\"\" class_file = open(classes_path, \"r\") classes = [] for line in class_file: classes.append(line.rstrip().lower())", "(int((256.0 / float(w)) * float(h)), 256, c) elif h < w: newshape =", "start STOP_TOKEN = \"*STOP*\" #Index: 0 PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN =", "splitline[0], splitline[1])) for line in test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for", "open(train_image_path, \"r\") test_file = open(test_image_path, \"r\") for line in train_file: splitline = line.rstrip().split('/')", "random image patch) or False if processing for testing (sample central image patch)", "= \"*START*\" # no index as start token is not added to vocabulary", "image intensity? (NOTE: not currently doing this, which is fine since original paper", "path in image_paths], axis=0) def pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list = [] for line", "newshape, anti_aliasing=True) start_r = 0 start_c = 0 if is_train: start_r = random.randint(0,", "line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line in test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0],", "io, transform, img_as_float32 import random START_TOKEN = \"*START*\" # no index as start", "\"\"\" padded_ingredients_list = [] for line in ingredient_list: line = list(set(line)) padded_ing =", "+= word + \" \" str = str[:-1] if str in ingredients_dict: pth", "*3 patch of the image 4. normalize image intensity? (NOTE: not currently doing", "\" \" str = str[:-1] if str in ingredients_dict: pth = os.path.join(r, file)", "* float(h)), 256, c) elif h < w: newshape = (256, int((256.0 /", "2. resize image to 256 * 256 * 3 3. randomly sample a", "5. return processed image \"\"\" image = io.imread(image_path) h,w,c = image.shape newshape =", "[] test_ingredient_list = [] train_image_paths = [] test_image_paths = [] train_file = open(train_image_path,", "build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients = np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients)", "2 WINDOW_SIZE = 20 def preprocess_image(image_path, is_train=True): \"\"\" 1. read image 2. resize", "3 return vocab def get_data(classes_path, ingredients_path, images, train_image_path, test_image_path): \"\"\" \"\"\" class_file =", "index as start token is not added to vocabulary as we don't want", "processing images for training (sample random image patch) or False if processing for", "= [] train_image_paths = [] test_image_paths = [] train_file = open(train_image_path, \"r\") test_file", "newshape[1]-224) else: start_r = (newshape[0] - 224) // 2 start_c = (newshape[1] -", "paths to image locations (such as a length batch_size slice of a larger", "sorted(list(set(tokens))) for i, word in enumerate(all_words): vocab[word] = i + 3 return vocab", "added to vocabulary as we don't want to predict start STOP_TOKEN = \"*STOP*\"", "= [] test_ingredient_list = [] train_image_paths = [] test_image_paths = [] train_file =", "224) // 2 image = image[start_r:start_r+224, start_c:start_c+224, :] return image def get_image_batch(image_paths, is_train=True):", "Image from skimage import io, transform, img_as_float32 import random START_TOKEN = \"*START*\" #", "for line in train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line in", "start_c = (newshape[1] - 224) // 2 image = image[start_r:start_r+224, start_c:start_c+224, :] return", "224, 3) containing the preprocessed images \"\"\" return np.stack([preprocess_image(path, is_train) for path in", "patch) or False if processing for testing (sample central image patch) return: a", "i in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list = [] test_ingredient_list = [] train_image_paths", "224 *3 patch of the image 4. normalize image intensity? (NOTE: not currently", "str in ingredients_dict: pth = os.path.join(r, file) if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif", "#Index: 1 UNK_TOKEN = \"*UNK*\" #Index: 2 WINDOW_SIZE = 20 def preprocess_image(image_path, is_train=True):", "= {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2} tokens = [] for i in", "in os.walk(images): for file in f: name = file.split(\"_\")[1:-1] str = \"\" for", "- 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\" \"\"\" return np.stack([[vocab[word] if", "for i in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list = [] test_ingredient_list = []", "open(classes_path, \"r\") classes = [] for line in class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path,", "start_r = (newshape[0] - 224) // 2 start_c = (newshape[1] - 224) //", "in test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r, d, f in", "image 4. normalize image intensity? (NOTE: not currently doing this, which is fine", "in vocab else vocab[UNK_TOKEN] for word in sentence] for sentence in sentences]) def", "= open(train_image_path, \"r\") test_file = open(test_image_path, \"r\") for line in train_file: splitline =", "train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line in test_file: splitline =", "PAD_TOKEN = \"*<PASSWORD>*\" #Index: 1 UNK_TOKEN = \"*UNK*\" #Index: 2 WINDOW_SIZE = 20", "\"\"\" \"\"\" padded_ingredients_list = [] for line in ingredient_list: line = list(set(line)) padded_ing", "= line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line in test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images,", "= \"\" for word in name: str += word + \" \" str", "/ float(w)) * float(h)), 256, c) elif h < w: newshape = (256,", "WINDOW_SIZE - len(padded_ing) - 1) padded_ingredients_list.append(padded_ing) return padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\" \"\"\"", "return padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\" \"\"\" return np.stack([[vocab[word] if word in vocab", "{STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2} tokens = [] for i in ingredients:", "enumerate(all_words): vocab[word] = i + 3 return vocab def get_data(classes_path, ingredients_path, images, train_image_path,", "convert_to_id(vocab, sentences): \"\"\" \"\"\" return np.stack([[vocab[word] if word in vocab else vocab[UNK_TOKEN] for", "file) if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab =", "import numpy as np from PIL import Image from skimage import io, transform,", "io.imread(image_path) h,w,c = image.shape newshape = (256,256,3) if h > w: newshape =", "3. randomly sample a 224 * 224 *3 patch of the image 4.", "[] train_file = open(train_image_path, \"r\") test_file = open(test_image_path, \"r\") for line in train_file:", "PAD_TOKEN: 1, UNK_TOKEN: 2} tokens = [] for i in ingredients: tokens.extend(i) all_words", "padded_ingredients_list def convert_to_id(vocab, sentences): \"\"\" \"\"\" return np.stack([[vocab[word] if word in vocab else", "ingredients_path, images, train_image_path, test_image_path): \"\"\" \"\"\" class_file = open(classes_path, \"r\") classes = []", "[] for line in ingredient_list: line = list(set(line)) padded_ing = line[:(WINDOW_SIZE - 2)]", "in enumerate(all_words): vocab[word] = i + 3 return vocab def get_data(classes_path, ingredients_path, images,", "1, UNK_TOKEN: 2} tokens = [] for i in ingredients: tokens.extend(i) all_words =", "a length batch_size slice of a larger list) param is_train: True if processing", "(newshape[0] - 224) // 2 start_c = (newshape[1] - 224) // 2 image", "os.walk(images): for file in f: name = file.split(\"_\")[1:-1] str = \"\" for word", "np.array(pad_ingredients(train_ingredient_list)) padded_test_ingredients = np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients = convert_to_id(vocab, padded_test_ingredients) return", "> w: newshape = (int((256.0 / float(w)) * float(h)), 256, c) elif h", "ingredients[i] train_ingredient_list = [] test_ingredient_list = [] train_image_paths = [] test_image_paths = []", "import Image from skimage import io, transform, img_as_float32 import random START_TOKEN = \"*START*\"", "name: str += word + \" \" str = str[:-1] if str in", "r, d, f in os.walk(images): for file in f: name = file.split(\"_\")[1:-1] str", "\"\"\" \"\"\" return np.stack([[vocab[word] if word in vocab else vocab[UNK_TOKEN] for word in", "doesn't) 5. return processed image \"\"\" image = io.imread(image_path) h,w,c = image.shape newshape", "as we don't want to predict start STOP_TOKEN = \"*STOP*\" #Index: 0 PAD_TOKEN", "numpy as np from PIL import Image from skimage import io, transform, img_as_float32", "UNK_TOKEN = \"*UNK*\" #Index: 2 WINDOW_SIZE = 20 def preprocess_image(image_path, is_train=True): \"\"\" 1.", "no index as start token is not added to vocabulary as we don't", "(len(image_paths), 224, 224, 3) containing the preprocessed images \"\"\" return np.stack([preprocess_image(path, is_train) for", "newshape = (256,256,3) if h > w: newshape = (int((256.0 / float(w)) *", "\"*START*\" # no index as start token is not added to vocabulary as", "intensity? (NOTE: not currently doing this, which is fine since original paper doesn't)", "(such as a length batch_size slice of a larger list) param is_train: True", "if pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list", "image \"\"\" image = io.imread(image_path) h,w,c = image.shape newshape = (256,256,3) if h", "= image[start_r:start_r+224, start_c:start_c+224, :] return image def get_image_batch(image_paths, is_train=True): \"\"\" param image_paths: a", "= (int((256.0 / float(w)) * float(h)), 256, c) elif h < w: newshape", "224, 224, 3) containing the preprocessed images \"\"\" return np.stack([preprocess_image(path, is_train) for path", "larger list) param is_train: True if processing images for training (sample random image", "else vocab[UNK_TOKEN] for word in sentence] for sentence in sentences]) def build_vocab(ingredients): \"\"\"", "sentence] for sentence in sentences]) def build_vocab(ingredients): \"\"\" \"\"\" vocab = {STOP_TOKEN: 0,", "pad_ingredients(ingredient_list): \"\"\" \"\"\" padded_ingredients_list = [] for line in ingredient_list: line = list(set(line))", "def get_image_batch(image_paths, is_train=True): \"\"\" param image_paths: a list of paths to image locations", "True if processing images for training (sample random image patch) or False if", "line in test_file: splitline = line.rstrip().split('/') test_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for r, d, f", "train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list + test_ingredient_list) padded_train_ingredients =", "in class_file: classes.append(line.rstrip().lower()) ingredients_file = open(ingredients_path, \"r\") ingredients = [] for line in", "ingredients_dict = {} for i in range(len(ingredients)): ingredients_dict[classes[i]] = ingredients[i] train_ingredient_list = []", "in train_file: splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line in test_file: splitline", "doing this, which is fine since original paper doesn't) 5. return processed image", "#Index: 2 WINDOW_SIZE = 20 def preprocess_image(image_path, is_train=True): \"\"\" 1. read image 2.", "return processed image \"\"\" image = io.imread(image_path) h,w,c = image.shape newshape = (256,256,3)", "256 * 3 3. randomly sample a 224 * 224 *3 patch of", "splitline = line.rstrip().split('/') train_image_paths.append(os.path.join(images, splitline[0], splitline[1])) for line in test_file: splitline = line.rstrip().split('/')", "to 256 * 256 * 3 3. randomly sample a 224 * 224", "pth in train_image_paths: train_ingredient_list.append(ingredients_dict[str].split(\",\")) elif pth in test_image_paths: test_ingredient_list.append(ingredients_dict[str].split(\",\")) vocab = build_vocab(train_ingredient_list +", "start token is not added to vocabulary as we don't want to predict", "word in enumerate(all_words): vocab[word] = i + 3 return vocab def get_data(classes_path, ingredients_path,", "= convert_to_id(vocab, padded_train_ingredients) test_ingredients = convert_to_id(vocab, padded_test_ingredients) return train_image_paths, train_ingredients, test_image_paths, test_ingredients, vocab", "= line[:(WINDOW_SIZE - 2)] padded_ing = [START_TOKEN] + padded_ing + [STOP_TOKEN] + [PAD_TOKEN]", "vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN: 2} tokens = [] for i", "np.array(pad_ingredients(test_ingredient_list)) train_ingredients = convert_to_id(vocab, padded_train_ingredients) test_ingredients = convert_to_id(vocab, padded_test_ingredients) return train_image_paths, train_ingredients, test_image_paths,", "= (newshape[0] - 224) // 2 start_c = (newshape[1] - 224) // 2", "0 if is_train: start_r = random.randint(0, newshape[0]-224) start_c = random.randint(0, newshape[1]-224) else: start_r", "vocabulary as we don't want to predict start STOP_TOKEN = \"*STOP*\" #Index: 0", "in f: name = file.split(\"_\")[1:-1] str = \"\" for word in name: str", "= list(set(line)) padded_ing = line[:(WINDOW_SIZE - 2)] padded_ing = [START_TOKEN] + padded_ing +", "2 image = image[start_r:start_r+224, start_c:start_c+224, :] return image def get_image_batch(image_paths, is_train=True): \"\"\" param", "in sentences]) def build_vocab(ingredients): \"\"\" \"\"\" vocab = {STOP_TOKEN: 0, PAD_TOKEN: 1, UNK_TOKEN:", "list) param is_train: True if processing images for training (sample random image patch)", "want to predict start STOP_TOKEN = \"*STOP*\" #Index: 0 PAD_TOKEN = \"*<PASSWORD>*\" #Index:" ]
[ "self.get_view_response_json() assert self.expected == result def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"', 'id': 'foo',", "**kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs ) view.request = self.request", ") view.request = self.request return view def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self,", "self.expected == result def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"', 'id': 'foo', 'create_id': True", "get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result = self.get_view_response_json() assert self.expected == result", "**kwargs ) view.request = self.request return view def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def", "return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result = self.get_view_response_json() assert self.expected == result def test_get_with_create_field(self):", "view.request = self.request return view def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs):", "= autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs ) view.request = self.request return view", "test_get(self): result = self.get_view_response_json() assert self.expected == result def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create", "'pagination': { 'more': False }, 'results': [] } @classmethod def setUpClass(cls): for i", "Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected = { 'pagination': { 'more': False },", "== result def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"', 'id': 'foo', 'create_id': True })", "super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs", "test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2,", "for i in range(0, 3): Group.objects.create(name='ViewTestCase%s' % i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass()", "Group.objects.create(name='ViewTestCase%s' % i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs): view =", "test from django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected = { 'pagination':", "django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected = { 'pagination': { 'more':", "from django import test from django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected", "= test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ),", "def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result", "'text': 'Create \"foo\"', 'id': 'foo', 'create_id': True }) result = self.get_view_response_json(create_field='name') assert self.expected", "return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result = self.get_view_response_json() assert", "<gh_stars>1000+ import json from dal import autocomplete from django import test from django.contrib.auth.models", "@classmethod def setUpClass(cls): for i in range(0, 3): Group.objects.create(name='ViewTestCase%s' % i) cls.request =", "json from dal import autocomplete from django import test from django.contrib.auth.models import Group", "i in range(0, 3): Group.objects.create(name='ViewTestCase%s' % i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def", "setUp(self): self.expected = { 'pagination': { 'more': False }, 'results': [] } @classmethod", "{ 'pagination': { 'more': False }, 'results': [] } @classmethod def setUpClass(cls): for", "autocomplete from django import test from django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self):", "import test from django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected = {", "from django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected = { 'pagination': {", "import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected = { 'pagination': { 'more': False", "def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result = self.get_view_response_json() assert self.expected ==", "self.request return view def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content)", "json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result = self.get_view_response_json() assert self.expected == result def test_get_with_create_field(self): self.expected['results'].append({", "cls).setUpClass() def get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs )", "result = self.get_view_response_json() assert self.expected == result def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"',", "= { 'pagination': { 'more': False }, 'results': [] } @classmethod def setUpClass(cls):", "class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected = { 'pagination': { 'more': False }, 'results':", "self.expected['results'].append({ 'text': 'Create \"foo\"', 'id': 'foo', 'create_id': True }) result = self.get_view_response_json(create_field='name') assert", "def setUpClass(cls): for i in range(0, 3): Group.objects.create(name='ViewTestCase%s' % i) cls.request = test.RequestFactory().get('?q=foo')", "django import test from django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected =", "False }, 'results': [] } @classmethod def setUpClass(cls): for i in range(0, 3):", "setUpClass(cls): for i in range(0, 3): Group.objects.create(name='ViewTestCase%s' % i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase,", "import json from dal import autocomplete from django import test from django.contrib.auth.models import", "from dal import autocomplete from django import test from django.contrib.auth.models import Group class", "'more': False }, 'results': [] } @classmethod def setUpClass(cls): for i in range(0,", "assert self.expected == result def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"', 'id': 'foo', 'create_id':", "i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence(", "result def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"', 'id': 'foo', 'create_id': True }) result", "get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs ) view.request =", "view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs ) view.request = self.request return", "range(0, 3): Group.objects.create(name='ViewTestCase%s' % i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs):", "Group.objects.all(), ), paginate_by=2, **kwargs ) view.request = self.request return view def get_view_response(self, **view_kwargs):", "view def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self):", "[] } @classmethod def setUpClass(cls): for i in range(0, 3): Group.objects.create(name='ViewTestCase%s' % i)", "self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result = self.get_view_response_json() assert self.expected", "def test_get(self): result = self.get_view_response_json() assert self.expected == result def test_get_with_create_field(self): self.expected['results'].append({ 'text':", "% i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView(", "dal import autocomplete from django import test from django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase):", "def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"', 'id': 'foo', 'create_id': True }) result =", "3): Group.objects.create(name='ViewTestCase%s' % i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs): view", "{ 'more': False }, 'results': [] } @classmethod def setUpClass(cls): for i in", "import autocomplete from django import test from django.contrib.auth.models import Group class Select2QuerySetSequenceViewTestCase(test.TestCase): def", "self.expected = { 'pagination': { 'more': False }, 'results': [] } @classmethod def", "'results': [] } @classmethod def setUpClass(cls): for i in range(0, 3): Group.objects.create(name='ViewTestCase%s' %", "= self.get_view_response_json() assert self.expected == result def test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"', 'id':", "def get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs ) view.request", "cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self, **kwargs): view = autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(),", "} @classmethod def setUpClass(cls): for i in range(0, 3): Group.objects.create(name='ViewTestCase%s' % i) cls.request", "in range(0, 3): Group.objects.create(name='ViewTestCase%s' % i) cls.request = test.RequestFactory().get('?q=foo') super(Select2QuerySetSequenceViewTestCase, cls).setUpClass() def get_view(self,", "autocomplete.Select2QuerySetSequenceView( queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs ) view.request = self.request return view def", "), paginate_by=2, **kwargs ) view.request = self.request return view def get_view_response(self, **view_kwargs): return", "= self.request return view def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return", "def setUp(self): self.expected = { 'pagination': { 'more': False }, 'results': [] }", "test_get_with_create_field(self): self.expected['results'].append({ 'text': 'Create \"foo\"', 'id': 'foo', 'create_id': True }) result = self.get_view_response_json(create_field='name')", "queryset=autocomplete.QuerySetSequence( Group.objects.all(), ), paginate_by=2, **kwargs ) view.request = self.request return view def get_view_response(self,", "}, 'results': [] } @classmethod def setUpClass(cls): for i in range(0, 3): Group.objects.create(name='ViewTestCase%s'", "return view def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def", "'Create \"foo\"', 'id': 'foo', 'create_id': True }) result = self.get_view_response_json(create_field='name') assert self.expected ==", "**view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result = self.get_view_response_json() assert self.expected == result def", "Select2QuerySetSequenceViewTestCase(test.TestCase): def setUp(self): self.expected = { 'pagination': { 'more': False }, 'results': []", "**view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result = self.get_view_response_json()", "paginate_by=2, **kwargs ) view.request = self.request return view def get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request)", "\"foo\"', 'id': 'foo', 'create_id': True }) result = self.get_view_response_json(create_field='name') assert self.expected == result", "get_view_response(self, **view_kwargs): return self.get_view(**view_kwargs).dispatch(self.request) def get_view_response_json(self, **view_kwargs): return json.loads(self.get_view_response(**view_kwargs).content) def test_get(self): result =" ]
[ "context.settings.active_mapping_file = ( RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\" ) # but then raise", "catch_exceptions=False) assert result.exit_code == 1 assert \"Test Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files):", "command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) )", "fixture from anonapi.cli import entrypoint from anonapi.cli.map_commands import ( MapCommandContext, activate, add_accession_numbers, add_selection,", "): \"\"\"Add all dicom files in this folder to mapping but do not", "context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False) assert result.exit_code == 0 assert not context.settings.active_mapping_file.exists() #", "# now try to add something from the directory with some dicom files", "\"inputfile\" / \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert result.exit_code ==", "class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that always injects a MapCommandContext instance\"\"\" def __init__(self,", "in this folder to mapping\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder =", "an existing mapping \"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) # assert mapping", "(input file contains 2 folders + names) input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" /", "MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert", "== \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" result =", "runner that always injects a MapCommandContext instance\"\"\" def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def", "should yield a nice 'no mapping' message \"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False) assert", "disk assert len(mapping.grid) == 1 added = ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) # and", "make sure a valid mapping file is found context = mock_main_runner.get_context() context.settings.active_mapping_file =", "to the current path assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add", "DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should give you a nice", "from disk assert len(mapping.grid) == 1 added = ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) #", "should not have been selected yet currently assert not selection_folder.has_file_selection() # but after", "test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an xls file containing several paths and", "are no rows mapping assert len(context.get_current_mapping().grid) == 0 # No selection file has", "= context.get_current_mapping() # reload from disk assert len(mapping.grid) == 1 added = ParameterSet(mapping.grid.rows[-1])", "add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) # There should be a selection there assert result.exit_code", "assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should give you a", "selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) #", "we start with a mapping file, but no active # after activating, active", "tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as", "NotImplementedError( \"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner @fixture", "be a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() assert \"that look", "= runner.invoke(delete, catch_exceptions=False) assert result.exit_code == 0 assert not context.settings.active_mapping_file.exists() # deleting again", "assert not context.settings.active_mapping_file.exists() # deleting again will yield nice message result = runner.invoke(delete)", "like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession", "from the input file should have been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x", "# Then run with input file input (input file contains 2 folders +", "a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping,", "test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom files in this folder to mapping\"\"\" context =", "start with a mapping file, but no active # after activating, active mapping", "[str(x) for y in mapping.rows for x in y] ) def test_cli_map(mock_main_runner, mock_cli_base_context,", "y in mapping.rows for x in y] ) def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result", "mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file = None # we start", "== 0 assert selection_folder.has_file_selection() # also, this selection should have been added to", "pytest import fixture from anonapi.cli import entrypoint from anonapi.cli.map_commands import ( MapCommandContext, activate,", "valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),)", "mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping()", "mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False) assert result.exit_code == 0 assert", "mapping file, but no active # after activating, active mapping should be set", "a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers,", "= [ParameterSet(x).get_param_by_type(PseudoName) for x in added] assert pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value ==", "1 added = ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) # and the identifier should be", "): \"\"\"Add an xls file containing several paths and potentially pseudonyms to an", "2 folders + names) input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\" result =", "def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError( \"Call settings.active_mapping_file instead\"", "mapping\" in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output ) # but after init there should", "file containing several paths and potentially pseudonyms to an existing mapping \"\"\" context", "create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple study folders using the add-study-folders command\"\"\" context: MapCommandContext", "\"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ):", "mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) # assert mapping is as expected mapping = context.get_current_mapping()", "context.get_current_mapping().grid.rows[20:] assert len(added) == 3 # and the pseudo names from the input", "Mapping, MappingFile, MappingLoadError, ) from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings import", "but after adding result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) # There", "def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping() @fixture def", "-> MappingContextRunner: \"\"\"A click CLIRunner with MapCommandContext that has a valid active mapping\"\"\"", "that always injects a MapCommandContext instance\"\"\" def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self)", "[ParameterSet(x).get_param_by_type(PseudoName) for x in added] assert pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value == \"studyB\"", "runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count == 2 assert \"that look", "result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with a corrupt mapping file should yield", "\"\"\"Add a file selection to a mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner =", "assert len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for y in mapping.rows", "look like DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should give you", "@fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError( \"Call settings.active_mapping_file", "Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom files in this folder", "by default there are no rows mapping assert len(context.get_current_mapping().grid) == 0 # No", "adding result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) # There should be a", "MappingFile(mapping_path).load_mapping() # should not crash # and the created mapping should have been", "= RESOURCE_PATH / \"test_cli\" runner = mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False)", "context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False) assert result.exit_code ==", "result.output assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings", "and the pseudo names from the input file should have been included pseudo_names", "with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as f: f.read() assert result.exit_code == 0 def", "folder_with_some_dicom_files # Add this folder to mapping result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False,", "\"Could not find mapping file at\" in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch", "super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context =", "should be a valid mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir /", "\"\"\" runner = mock_main_runner result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code ==", "assert len(context.get_current_mapping().grid) == 0 # dicom files should not have been selected yet", "# and the created mapping should have been activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path", "mapping' message \"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code == 1 assert \"No", "mapping file should yield a nice 'no mapping' message \"\"\" result = runner_without_mapping.invoke(status,", "\"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count == 2 assert \"that look like DICOM\" in", "when loading def mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner() result", "from anonapi.cli.map_commands import ( MapCommandContext, activate, add_accession_numbers, add_selection, delete, edit, find_dicom_files, add_study_folders, init,", "folder_with_some_dicom_files.path) folders = [ x for x in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ]", "you a nice print of contents\"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert", "status, ) from anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, ) from anonapi.parameters", "runner.invoke(init) # by default there are no rows assert len(context.get_current_mapping().grid) == 0 #", "mock_main_runner # there should be no mapping to start with assert ( \"Could", "\"No active mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a directory not", "click CLIRunner that passes MapCommandContext without active mapping (active mapping is None) \"\"\"", "# after activating, active mapping should be set runner.invoke(activate) assert settings.active_mapping_file == context.current_dir", "after init there should be a valid mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path", "0 # Then run with input file input (input file contains 2 folders", "result = runner.invoke(delete) assert result.exit_code == 1 assert \"No such file or directory\"", "folders = [ x for x in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ] #", "from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings from tests.conftest import", "context = mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH / \"test_cli\" runner = mock_main_runner_with_mapping result =", "RESOURCE_PATH / \"test_cli\" runner = mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert", "result.exit_code == 0 # Then run with input file input (input file contains", ") result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count == 2", "MapCommandContext instance\"\"\" def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping()", "Mapping: return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir = lambda:", "\"anon_mapping.csv\" return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping", "AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file = None # we start with a mapping", ") assert result.exit_code == 0 # now three rows should have been added", "around the function that adds paths to mapping. Function will still works as", "**kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder def test_cli_map_add_study_folders( runner_with_mapping,", "in result.output assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context)", "== 0 # now three rows should have been added added = context.get_current_mapping().grid.rows[20:]", "== 1 assert \"No active mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info", "\"anon_mapping.csv\" ) # but then raise exception when loading def mock_load(x): raise MappingLoadError(\"Test", "been added to the mapping: mapping = context.get_current_mapping() # reload from disk assert", "mapping file found at\" in result.output assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context =", "are no rows assert len(context.get_current_mapping().grid) == 0 # dicom files should not have", "ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) # and the identifier should be a FileSelectionIdentifier which", "all dicom files in this folder to mapping but do not scan\"\"\" context", "of contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH / \"test_cli\" runner = mock_main_runner_with_mapping", "def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all dicom files in this folder to", "identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all dicom files in this folder", "nice 'no mapping' message \"\"\" runner = mock_main_runner result = runner.invoke(entrypoint.cli, \"map status\",", "a valid mapping file is found context = mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH", "get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir", "mock_load) runner = CliRunner() result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code ==", "function that adds paths to mapping. Function will still works as normal, but", "import Mock from click.testing import CliRunner from pytest import fixture from anonapi.cli import", "\"inputfile\" / \"some_folder_names.xlsx\" result = runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert result.exit_code", "assert result.exit_code == 0 # now three rows should have been added added", "\"Test Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom files in this", "selection should have been added to the mapping: mapping = context.get_current_mapping() # reload", "/ \"anon_mapping.csv\", \"r\") as f: f.read() assert result.exit_code == 0 def test_cli_map_init(mock_main_runner, tmpdir):", "to a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code == 0 mapping =", "= CliRunner() result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert", "= mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH / \"test_cli\" runner = mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli,", "start with assert ( \"Could not find mapping\" in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output", "the current path assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all", "test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\",", "nice message result = runner.invoke(delete) assert result.exit_code == 1 assert \"No such file", "assert \"Could not find mapping file at\" in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files,", "len(added) == 3 # and the pseudo names from the input file should", "the add-study-folders command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda:", "mapping (active mapping is None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) )", "should be set runner.invoke(activate) assert settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\" # Graceful error", "directory not containing a mapping file should yield a nice 'no mapping' message", "info should give you a nice print of contents\"\"\" context = mock_map_context_with_mapping runner", "\"\"\"Add some accession numbers to a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code", "folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an xls file containing several paths and potentially pseudonyms", "point assert not selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders, args=[str(selection_folder.path)],", "assert result.exit_code == 0 assert mock_launch.called # now try edit without any mapping", "assert mapping is as expected mapping = context.get_current_mapping() assert len(mapping.grid) == 20 #", "str(input_file_path)], catch_exceptions=False ) assert result.exit_code == 0 # now three rows should have", "the created mapping should have been activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping):", "do not scan\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init)", "/ \"inputfile\" / \"some_folder_names.xlsx\" result = runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert", "context.get_current_mapping() # reload from disk assert len(mapping.grid) == 1 added = ParameterSet(mapping.grid.rows[-1]) identifier", "= mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files # Add this folder to", "activate, add_accession_numbers, add_selection, delete, edit, find_dicom_files, add_study_folders, init, status, ) from anonapi.mapper import", "/ \"test_cli\" / \"anon_mapping.csv\" ) # but then raise exception when loading def", "yet! assert \"No active mapping\" in result.output # make one runner.invoke(init) # by", "active mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a directory not containing", "result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) # There should be a", "\"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) # assert mapping is as expected", "selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() assert \"that look like DICOM\"", "mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\")", "look like DICOM\" in result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around the", "have been added to the mapping: mapping = context.get_current_mapping() # reload from disk", "\"anon_mapping.csv\", \"r\") as f: f.read() assert result.exit_code == 0 def test_cli_map_init(mock_main_runner, tmpdir): runner", "then raise exception when loading def mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner", "No selection file has been put in the folder at this point assert", "the pseudo names from the input file should have been included pseudo_names =", "= folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders = [ x for x in folder_with_some_dicom_files.path.glob(\"*\")", "be set runner.invoke(activate) assert settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\" # Graceful error when", "monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ):", "run with regular command line input result = runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False )", "test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a directory not containing a mapping file should yield", "file should yield a nice message\"\"\" # make sure a valid mapping file", "MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that passes MapCommandContext without active", "mapping to start with assert ( \"Could not find mapping\" in runner.invoke(entrypoint.cli, \"map", "assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not crash # and the created mapping should", "add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) # oh no! no mapping yet! assert \"No active", "found at\" in result.output assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner", "# make sure a valid mapping file is found context = mock_main_runner.get_context() context.settings.active_mapping_file", "[ x for x in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ] # First run", "assert len(mapping.grid) == 20 # now try to add something from the directory", "import ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner, MockContextCliRunner", "there should be a valid mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir", "a nice 'no mapping' message \"\"\" runner = mock_main_runner result = runner.invoke(entrypoint.cli, \"map", "# oh no! no mapping yet! assert \"No active mapping\" in result.output #", "the original function, but track calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\",", "file input (input file contains 2 folders + names) input_file_path = MAPPER_RESOURCE_PATH /", "add_accession_numbers, add_selection, delete, edit, find_dicom_files, add_study_folders, init, status, ) from anonapi.mapper import (", "MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\" result = runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False )", "= runner.invoke(edit) assert \"No mapping file found at\" in result.output assert not mock_launch.called", ") @fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click CLIRunner with MapCommandContext that has", "mapping file should yield a nice message\"\"\" # make sure a valid mapping", "MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that always injects", "\"No active mapping\" in result.output # make one runner.invoke(init) # by default there", "anonapi.settings import DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner, MockContextCliRunner from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH", "selection_folder = folder_with_some_dicom_files runner.invoke(init) # by default there are no rows assert len(context.get_current_mapping().grid)", "result.exit_code == 0 assert selection_folder.has_file_selection() assert \"that look like DICOM\" in result.output @fixture", "in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result =", "\"map activate\", catch_exceptions=False).output ) # but after init there should be a valid", "not find mapping file at\" in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ):", "exception when loading def mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner()", "runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) # There should be a selection there", "/ \"test_cli\" runner = mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code", ") assert result.exit_code == 0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21 assert", "with some dicom files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders = [", "mapping file should yield a nice 'no mapping' message \"\"\" runner = mock_main_runner", "been put in the folder at this point assert not selection_folder.has_file_selection() # but", "assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers to", "valid mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists()", "mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def", "in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a directory not containing a mapping", "info should give you a nice print of contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir", "raise exception when loading def mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner =", "# should not crash # and the created mapping should have been activated", "no mapping yet! assert \"No active mapping\" in result.output # make one runner.invoke(init)", "result.output # make one runner.invoke(init) # by default there are no rows mapping", "but after adding result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) # There should", "1 assert \"No such file or directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context", "no active # after activating, active mapping should be set runner.invoke(activate) assert settings.active_mapping_file", "edit without any mapping being present mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit) assert \"No", "folders + names) input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\" result = runner.invoke(", "yield a nice message\"\"\" # make sure a valid mapping file is found", "= runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 0 assert \"folder:folder/file4 patientName4\" in", "message \"\"\" runner = mock_main_runner result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code", "context.settings.active_mapping_file.exists() # deleting again will yield nice message result = runner.invoke(delete) assert result.exit_code", "= context.settings settings.active_mapping_file = None # we start with a mapping file, but", "\"\"\"A click CLIRunner that passes MapCommandContext without active mapping (active mapping is None)", "assert \"folder:folder/file4 patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a directory not", "\"\"\"Running info on a directory not containing a mapping file should yield a", "active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture", "context: MapCommandContext = runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result", "result.exit_code == 0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\" in", "have been activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should", "scan\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init) # by", "is as expected mapping = context.get_current_mapping() assert len(mapping.grid) == 20 # now try", "== 0 def test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner # there should be no", "line input result = runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code == 0", "instead\" ) context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) ->", "catch_exceptions=False ) # There should be a selection there assert result.exit_code == 0", "context.current_dir / \"anon_mapping.csv\" # Graceful error when activating when there is no mapping", "Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner() result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert", "result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count == 2 assert", "with a corrupt mapping file should yield a nice message\"\"\" # make sure", "# dicom files should not have been selected yet currently assert not selection_folder.has_file_selection()", "assert pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\"", "mapping should have been activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map", "mapping \"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) # assert mapping is as", "row easier. Not like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping):", "catch_exceptions=False, ) # oh no! no mapping yet! assert \"No active mapping\" in", "mapping = context.get_current_mapping() assert len(mapping.grid) == 20 # now try to add something", "files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders = [ x for x", "added = context.get_current_mapping().grid.rows[20:] assert len(added) == 3 # and the pseudo names from", "input file input (input file contains 2 folders + names) input_file_path = MAPPER_RESOURCE_PATH", "mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not crash # and the", "runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that passes MapCommandContext without active mapping (active mapping is", "at\" in result.output assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner =", "folder to mapping result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) # oh no!", "( MapCommandContext, activate, add_accession_numbers, add_selection, delete, edit, find_dicom_files, add_study_folders, init, status, ) from", "test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with a corrupt mapping file should yield a nice", "result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 0 assert \"folder:folder/file4 patientName4\"", "message result = runner.invoke(delete) assert result.exit_code == 1 assert \"No such file or", "@fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click CLIRunner with MapCommandContext that has a", "put in the folder at this point assert not selection_folder.has_file_selection() # but after", "give you a nice print of contents\"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context)", "runner = AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file = None # we start with", "f.read() assert result.exit_code == 0 def test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner # there", "result = runner.invoke(edit) assert \"No mapping file found at\" in result.output assert not", "assert result.exit_code == 0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\"", "assert \"No active mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a directory", "current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a file selection", "MappingContextRunner: \"\"\"A click CLIRunner with MapCommandContext that has a valid active mapping\"\"\" return", "should be a FileSelectionIdentifier which is # relative to the current path assert", "from pathlib import Path from unittest.mock import Mock from click.testing import CliRunner from", "mock_launch): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False) assert result.exit_code", "several paths and potentially pseudonyms to an existing mapping \"\"\" context = mock_map_context_with_mapping", "contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH / \"test_cli\" runner = mock_main_runner_with_mapping result", "been selected yet currently assert not selection_folder.has_file_selection() # but after adding result =", "assert pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession", "def test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner # there should be no mapping to", "find mapping\" in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output ) # but after init there", "AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init) # by default there are no rows assert", "identifier should be a FileSelectionIdentifier which is # relative to the current path", "init there should be a valid mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path =", "== 1 assert \"No such file or directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch):", "runner.invoke(init) # by default there are no rows mapping assert len(context.get_current_mapping().grid) == 0", "There should be a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() #", "return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files,", "assert \"that look like DICOM\" in result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator", "runner.invoke(delete) assert \"Could not find mapping file at\" in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping,", "result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"No active", "not context.settings.active_mapping_file.exists() # deleting again will yield nice message result = runner.invoke(delete) assert", "context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False) assert", "assert mock_launch.called # now try edit without any mapping being present mock_launch.reset_mock() runner.invoke(delete)", "First run with regular command line input result = runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False", "\"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add", "= runner_with_mapping.get_current_mapping() # TODO: make accessing a specific parameter in a row easier.", "will yield nice message result = runner.invoke(delete) assert result.exit_code == 1 assert \"No", "find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original function, but track calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args,", "runner.invoke(edit, catch_exceptions=False) assert result.exit_code == 0 assert mock_launch.called # now try edit without", "created mapping should have been activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running", "in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ] # First run with regular command line", "potentially pseudonyms to an existing mapping \"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context)", "relative to the current path assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ):", "-> Mapping: return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir =", "= ( RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\" ) # but then raise exception", "assert selection_folder.has_file_selection() assert \"that look like DICOM\" in result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add", "the input file should have been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in", "mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError( \"Call settings.active_mapping_file instead\" )", "mapping assert len(context.get_current_mapping().grid) == 0 # No selection file has been put in", "adds paths to mapping. Function will still works as normal, but calls are", "specific parameter in a row easier. Not like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier ==", "mapping\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files # Add this", "existing mapping \"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) # assert mapping is", "to mapping result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) # oh no! no", "not scan\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init) #", "rows mapping assert len(context.get_current_mapping().grid) == 0 # No selection file has been put", "file at\" in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an xls", "runner.invoke(delete) result = runner.invoke(edit) assert \"No mapping file found at\" in result.output assert", "return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError(", "in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with a corrupt mapping file should", "by default there are no rows assert len(context.get_current_mapping().grid) == 0 # dicom files", "mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a file", "\"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert result.exit_code == 0 mapping", "add_study_folders, init, status, ) from anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, )", "not selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False )", ") return recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple study", "file contains 2 folders + names) input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\"", "== 2 assert \"that look like DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map", "is None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection( runner_with_mapping,", "assert result.exit_code == 0 assert not context.settings.active_mapping_file.exists() # deleting again will yield nice", "now try edit without any mapping being present mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit)", "lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count", "import ( MapCommandContext, activate, add_accession_numbers, add_selection, delete, edit, find_dicom_files, add_study_folders, init, status, )", "a mapping file should yield a nice 'no mapping' message \"\"\" runner =", "context.settings settings.active_mapping_file = None # we start with a mapping file, but no", "\"test_cli\" runner = mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code ==", "without any mapping being present mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit) assert \"No mapping", "not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings = context.settings", "input result = runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code == 0 #", "contains 2 folders + names) input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\" result", "def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that passes MapCommandContext without active mapping (active mapping", "def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), )", "a nice message\"\"\" # make sure a valid mapping file is found context", "def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False)", "\"\"\" recorder = Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original function, but track", "FileSelectionIdentifier which is # relative to the current path assert not identifier.path.is_absolute() def", "monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders = [ x for x in folder_with_some_dicom_files.path.glob(\"*\") if not", "): \"\"\"Add a file selection to a mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner", "mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should give you a nice print of", "\"anon_mapping.csv\" ), ) @fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click CLIRunner with MapCommandContext", "Then run with input file input (input file contains 2 folders + names)", "Mock from click.testing import CliRunner from pytest import fixture from anonapi.cli import entrypoint", "mapping should be set runner.invoke(activate) assert settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\" # Graceful", "def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple study folders using the", "/ \"anon_mapping.csv\" return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings(", "\"\"\"A click CLIRunner with MapCommandContext that has a valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping)", "input (input file contains 2 folders + names) input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\"", "= runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count == 2 assert \"that", "in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should give you a nice print", "self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError( \"Call", "args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code == 0 # Then run with input file", "some accession numbers to a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code ==", "multiple study folders using the add-study-folders command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context context.current_dir =", "runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() # TODO: make accessing", "MapCommandContext without active mapping (active mapping is None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir,", "in \"\".join( [str(x) for y in mapping.rows for x in y] ) def", "to an existing mapping \"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) # assert", "MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A", "def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom files in this folder to mapping\"\"\" context", "catch_exceptions=False ) assert result.exit_code == 0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21", "mapping in current dir runner.invoke(delete) assert \"Could not find mapping file at\" in", "MapCommandContext that has a valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) ->", "mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all dicom files in this folder to mapping but", "raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner() result = runner.invoke(entrypoint.cli, \"map status\",", "def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH /", "args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert result.exit_code == 0 # now three rows should", "mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner = runner_with_mapping result = runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False", "== 0 assert mock_launch.called # now try edit without any mapping being present", "/ DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not crash # and the created", "= runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for y", "edit, find_dicom_files, add_study_folders, init, status, ) from anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile,", "map info should give you a nice print of contents\"\"\" context = mock_map_context_with_mapping", "catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count == 2 assert \"that look like DICOM\" in result.output", "with a mapping file, but no active # after activating, active mapping should", "yield nice message result = runner.invoke(delete) assert result.exit_code == 1 assert \"No such", "from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click", "runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files # Add this folder to mapping result", ") def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH", "1 assert \"No active mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a", "and the identifier should be a FileSelectionIdentifier which is # relative to the", "context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\",", "active mapping (active mapping is None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() )", "\"Could not find mapping\" in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output ) # but after", "assert result.exit_code == 0 assert \"folder:folder/file4 patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info", "mapping result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) # oh no! no mapping", "result.exit_code == 1 assert \"No active mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running", "or directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context)", "but after init there should be a valid mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False)", "folder to mapping but do not scan\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context)", "pseudonyms to an existing mapping \"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) #", "0 # now three rows should have been added added = context.get_current_mapping().grid.rows[20:] assert", "normal, but calls are recorded \"\"\" recorder = Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run", "click CLIRunner with MapCommandContext that has a valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture", "dicom files should not have been selected yet currently assert not selection_folder.has_file_selection() #", "def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should give you a nice print of contents\"\"\"", "'no mapping' message \"\"\" runner = mock_main_runner result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False)", "calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder def", "monkeypatch, ): \"\"\"Add multiple study folders using the add-study-folders command\"\"\" context: MapCommandContext =", "yield a nice 'no mapping' message \"\"\" runner = mock_main_runner result = runner.invoke(entrypoint.cli,", "import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that", "= a_folder_with_mapping_and_fileselection runner = runner_with_mapping result = runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False ) assert", "result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) # oh no! no mapping yet!", "folder_with_some_dicom_files runner.invoke(init) # by default there are no rows assert len(context.get_current_mapping().grid) == 0", "= AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init) # by default there are no rows", "selection_folder.has_file_selection() # also, this selection should have been added to the mapping: mapping", "folder_with_some_dicom_files): \"\"\"Add all dicom files in this folder to mapping\"\"\" context = mock_map_context_without", "def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should give you a nice print of contents\"\"\"", "== 0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join(", "a mapping file, but no active # after activating, active mapping should be", "folder to mapping\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files #", "= mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False) assert result.exit_code == 0", "a corrupt mapping file should yield a nice message\"\"\" # make sure a", "assert result.exit_code == 1 assert \"Test Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add", "a mapping file should yield a nice 'no mapping' message \"\"\" result =", "assert \"No such file or directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context =", "result.exit_code == 0 assert selection_folder.has_file_selection() # also, this selection should have been added", "in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an xls file containing", "dicom files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders = [ x for", "assert result.exit_code == 0 def test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner # there should", "= mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not crash # and", "settings.active_mapping_file instead\" ) context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping)", "that has a valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) -> MapCommandContext:", "f: f.read() assert result.exit_code == 0 def test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner #", "patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a directory not containing a", "DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, ) from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings", "runner = CliRunner() result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1", "sure a valid mapping file is found context = mock_main_runner.get_context() context.settings.active_mapping_file = (", "0 assert not context.settings.active_mapping_file.exists() # deleting again will yield nice message result =", "being present mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit) assert \"No mapping file found at\"", "test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False) assert", "mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), ) @fixture", "settings.active_mapping_file = None # we start with a mapping file, but no active", "there assert result.exit_code == 0 assert selection_folder.has_file_selection() assert \"that look like DICOM\" in", "fileselection_path = a_folder_with_mapping_and_fileselection runner = runner_with_mapping result = runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False )", "= runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke(", ") from anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, ) from anonapi.parameters import", "default there are no rows mapping assert len(context.get_current_mapping().grid) == 0 # No selection", "runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() #", "# we start with a mapping file, but no active # after activating,", "a directory not containing a mapping file should yield a nice 'no mapping'", "names from the input file should have been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for", "containing a mapping file should yield a nice 'no mapping' message \"\"\" result", "assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"123456.12321313\" )", "result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"Test Exception\"", "should yield a nice 'no mapping' message \"\"\" runner = mock_main_runner result =", "CLIRunner that passes MapCommandContext without active mapping (active mapping is None) \"\"\" return", "result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom files in this folder to mapping\"\"\"", "result = mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as f:", "containing several paths and potentially pseudonyms to an existing mapping \"\"\" context =", "str(selection_folder.path)], catch_exceptions=False, ) # There should be a selection there assert result.exit_code ==", "file is found context = mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH / \"test_cli\" /", "from anonapi.cli import entrypoint from anonapi.cli.map_commands import ( MapCommandContext, activate, add_accession_numbers, add_selection, delete,", "Path from unittest.mock import Mock from click.testing import CliRunner from pytest import fixture", "a nice print of contents\"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists()", "@fixture def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that passes MapCommandContext without active mapping (active", "result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a directory not containing a mapping file", "as expected mapping = context.get_current_mapping() assert len(mapping.grid) == 20 # now try to", "== 21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for y in mapping.rows for x", "oh no! no mapping yet! assert \"No active mapping\" in result.output # make", "x.is_file() ] # First run with regular command line input result = runner.invoke(", "names) input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\" result = runner.invoke( add_study_folders, args=[\"-f\",", "= mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 0 assert", "21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for y in mapping.rows for x in", "runner.invoke(edit) assert \"No mapping file found at\" in result.output assert not mock_launch.called def", "in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output ) # but after init there should be", "-> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), ) @fixture def", "and potentially pseudonyms to an existing mapping \"\"\" context = mock_map_context_with_mapping runner =", "error when activating when there is no mapping in current dir runner.invoke(delete) assert", "a valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir,", "in a row easier. Not like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" )", "should have been activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info", "runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"Test Exception\" in result.output", "= mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as f: f.read()", "# make one runner.invoke(init) # by default there are no rows mapping assert", "there are no rows assert len(context.get_current_mapping().grid) == 0 # dicom files should not", "len(context.get_current_mapping().grid) == 0 # dicom files should not have been selected yet currently", "study folders using the add-study-folders command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path", "/ \"anon_mapping.csv\" # Graceful error when activating when there is no mapping in", "\"that look like DICOM\" in result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around", "mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as f: f.read() assert", "calls are recorded \"\"\" recorder = Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original", "find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder,", "), ) @fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click CLIRunner with MapCommandContext that", "result = runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False ) assert result.exit_code == 0 mapping =", "no rows assert len(context.get_current_mapping().grid) == 0 # dicom files should not have been", "run with input file input (input file contains 2 folders + names) input_file_path", "runner_with_mapping.get_current_mapping() # TODO: make accessing a specific parameter in a row easier. Not", "without active mapping (active mapping is None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings()", "a MapCommandContext instance\"\"\" def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping: return", "current path assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all dicom", "str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count ==", "mapping = runner_with_mapping.get_current_mapping() assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"123456.12321313\" ) assert ParameterSet(mapping.rows[-1]).as_dict()[\"pseudo_name\"].value == \"study3\"", "result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) # There should be a selection", "/ \"anon_mapping.csv\" ), ) @fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click CLIRunner with", "a_folder_with_mapping_and_fileselection runner = runner_with_mapping result = runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False ) assert result.exit_code", "AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files # Add this folder to mapping result = runner.invoke(", "assert len(context.get_current_mapping().grid) == 0 # No selection file has been put in the", "decorator around the function that adds paths to mapping. Function will still works", "assert selection_folder.has_file_selection() # also, this selection should have been added to the mapping:", "args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) # There should be a selection there assert result.exit_code", "input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)]", "monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, )", "MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that passes MapCommandContext", "# there should be no mapping to start with assert ( \"Could not", "mapping but do not scan\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder =", "be no mapping to start with assert ( \"Could not find mapping\" in", "RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\" ) # but then raise exception when loading", "activate\", catch_exceptions=False).output ) # but after init there should be a valid mapping", "@fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around the function that adds paths to", "AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False) assert result.exit_code == 0 assert not", "active # after activating, active mapping should be set runner.invoke(activate) assert settings.active_mapping_file ==", "should have been added added = context.get_current_mapping().grid.rows[20:] assert len(added) == 3 # and", "but then raise exception when loading def mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load)", "should yield a nice message\"\"\" # make sure a valid mapping file is", "@fixture def mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A click", "# by default there are no rows assert len(context.get_current_mapping().grid) == 0 # dicom", "some dicom files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders = [ x", "there should be no mapping to start with assert ( \"Could not find", "assert settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\" # Graceful error when activating when there", "if not x.is_file() ] # First run with regular command line input result", "mapping being present mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit) assert \"No mapping file found", "corrupt mapping file should yield a nice message\"\"\" # make sure a valid", "such file or directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping runner", "return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), ) @fixture def runner_with_mapping(mock_map_context_with_mapping) ->", "mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 0 assert \"folder:folder/file4", "MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), ) @fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner:", "# assert mapping is as expected mapping = context.get_current_mapping() assert len(mapping.grid) == 20", "no mapping in current dir runner.invoke(delete) assert \"Could not find mapping file at\"", "ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\"", "mock_launch.called # now try edit without any mapping being present mock_launch.reset_mock() runner.invoke(delete) result", "path assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all dicom files", "directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result", "= ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) # and the identifier should be a FileSelectionIdentifier", "\"\"\"Run the original function, but track calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr(", "folders using the add-study-folders command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr(", "assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings =", "runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() assert", "def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an xls file containing several paths", "def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around the function that adds paths to mapping.", "find_dicom_files_recorded, ) return recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple", "make accessing a specific parameter in a row easier. Not like below. assert", "def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a file selection to a mapping.\"\"\" mapping_folder,", "mock_main_runner result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"No", "test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a file selection to a mapping.\"\"\" mapping_folder, fileselection_path", "mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH / \"test_cli\" runner = mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map", "valid mapping file is found context = mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH /", "file has been put in the folder at this point assert not selection_folder.has_file_selection()", "AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False) assert result.exit_code == 0 assert mock_launch.called # now", "accession numbers to a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\" result", "monkeypatch): \"\"\"Running info with a corrupt mapping file should yield a nice message\"\"\"", "\"that look like DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should give", "assert \"No active mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with a", "DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not crash # and the created mapping", "= runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) # There should be a selection", "a specific parameter in a row easier. Not like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier", "active mapping\" in result.output # make one runner.invoke(init) # by default there are", "should not crash # and the created mapping should have been activated assert", "= runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\" in result.output", "result = runner.invoke(delete, catch_exceptions=False) assert result.exit_code == 0 assert not context.settings.active_mapping_file.exists() # deleting", "import AnonAPIContextRunner, MockContextCliRunner from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\" class", "import CliRunner from pytest import fixture from anonapi.cli import entrypoint from anonapi.cli.map_commands import", "crash # and the created mapping should have been activated assert mock_main_runner.get_context().settings.active_mapping_file ==", "\"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"Test Exception\" in result.output def", "result = runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code == 0 # Then", ") assert result.exit_code == 0 # Then run with input file input (input", "\"studyA\" assert pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers to a", "\"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\" in result.output", "= added.get_param_by_type(SourceIdentifierParameter) # and the identifier should be a FileSelectionIdentifier which is #", "after activating, active mapping should be set runner.invoke(activate) assert settings.active_mapping_file == context.current_dir /", "= runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping()", "add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() assert (", "runner = mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 0", "nice print of contents\"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result", "not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all dicom files in this", "== 0 # Then run with input file input (input file contains 2", "= runner_with_mapping result = runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False ) assert result.exit_code == 0", "runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False ) assert result.exit_code == 0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert", "context.current_dir = lambda: NotImplementedError( \"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\"", "0 assert selection_folder.has_file_selection() # also, this selection should have been added to the", "test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a directory not containing a mapping file should yield", "this folder to mapping result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) # oh", "an xls file containing several paths and potentially pseudonyms to an existing mapping", "len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for y in mapping.rows for", "pathlib import Path from unittest.mock import Mock from click.testing import CliRunner from pytest", "0 # No selection file has been put in the folder at this", "are recorded \"\"\" recorder = Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original function,", "for y in mapping.rows for x in y] ) def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir):", "in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a directory not containing a mapping", "assert not selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False", "print of contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH / \"test_cli\" runner =", "selection_folder = folder_with_some_dicom_files # Add this folder to mapping result = runner.invoke( add_study_folders,", "test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all dicom files in this folder to mapping", "that adds paths to mapping. Function will still works as normal, but calls", "this folder to mapping\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files", "= AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False) assert result.exit_code == 0 assert", "should be a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() # also,", "\"\"\"Add a decorator around the function that adds paths to mapping. Function will", "test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\"", "x for x in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ] # First run with", "import Path from unittest.mock import Mock from click.testing import CliRunner from pytest import", "in y] ) def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False)", "Function will still works as normal, but calls are recorded \"\"\" recorder =", "\"No mapping file found at\" in result.output assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context", "mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner = runner_with_mapping result = runner.invoke( add_selection, str(fileselection_path),", "== 0 assert selection_folder.has_file_selection() assert \"that look like DICOM\" in result.output @fixture def", "assert len(added) == 3 # and the pseudo names from the input file", "init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not", "info on a directory not containing a mapping file should yield a nice", "= RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that always injects a", ") ) def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a file selection to a", "result.exit_code == 0 # now three rows should have been added added =", "instance\"\"\" def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping() @fixture", "-> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that passes", "runner.invoke(delete, catch_exceptions=False) assert result.exit_code == 0 assert not context.settings.active_mapping_file.exists() # deleting again will", "current dir runner.invoke(delete) assert \"Could not find mapping file at\" in runner.invoke(activate).output def", "runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False) assert result.exit_code == 0", "== 20 # now try to add something from the directory with some", "mapping is None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection(", "pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers", "0 def test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner # there should be no mapping", "init\", catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as f: f.read() assert result.exit_code ==", "runner.invoke(delete) assert result.exit_code == 1 assert \"No such file or directory\" in result.output", "== 0 assert not context.settings.active_mapping_file.exists() # deleting again will yield nice message result", "== mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should give you a nice print", "\"some_folder_names.xlsx\" result = runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert result.exit_code == 0", "\"test_cli\" / \"anon_mapping.csv\" ) # but then raise exception when loading def mock_load(x):", "selection to a mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner = runner_with_mapping result =", "assert \"Test Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom files in", "MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a", "in this folder to mapping but do not scan\"\"\" context = mock_map_context_without runner", "return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that passes MapCommandContext without", "a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() # also, this selection", "runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke( add_study_folders,", "len(mapping.grid) == 1 added = ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) # and the identifier", "this point assert not selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders,", "file, but no active # after activating, active mapping should be set runner.invoke(activate)", "dicom files in this folder to mapping but do not scan\"\"\" context =", "the mapping: mapping = context.get_current_mapping() # reload from disk assert len(mapping.grid) == 1", "any mapping being present mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit) assert \"No mapping file", "\"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers,", "mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for", "result = runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert result.exit_code == 0 #", "to a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke(", "nice message\"\"\" # make sure a valid mapping file is found context =", "will still works as normal, but calls are recorded \"\"\" recorder = Mock()", "tmpdir): runner = mock_main_runner # there should be no mapping to start with", "= runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) # There should be a selection there", "return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir):", "result.exit_code == 1 assert \"No such file or directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping,", "easier. Not like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add", "input file should have been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in added]", "# but then raise exception when loading def mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\",", "# Add this folder to mapping result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, )", "result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"123456.12321313\" ) assert", "\"map init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should", "result.exit_code == 0 assert not context.settings.active_mapping_file.exists() # deleting again will yield nice message", "runner = runner_with_mapping result = runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False ) assert result.exit_code ==", "Graceful error when activating when there is no mapping in current dir runner.invoke(delete)", "not find mapping\" in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output ) # but after init", "a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping()", "/ \"inputfile\" / \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert result.exit_code", "runner.invoke(activate) assert settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\" # Graceful error when activating when", "ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner, MockContextCliRunner from", "\"\"\"Add an xls file containing several paths and potentially pseudonyms to an existing", "args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count == 2 assert \"that look like DICOM\"", "catch_exceptions=False) assert result.exit_code == 0 assert mock_launch.called # now try edit without any", "selection file has been put in the folder at this point assert not", "# but after adding result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) #", "# now three rows should have been added added = context.get_current_mapping().grid.rows[20:] assert len(added)", "in added] assert pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add", "folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False,", "runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\" in result.output def", "track calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder", "like DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should give you a", "a file selection to a mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner = runner_with_mapping", "== 3 # and the pseudo names from the input file should have", "recorded \"\"\" recorder = Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original function, but", "def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file =", "runner = mock_main_runner result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1", "status\", catch_exceptions=False) assert result.exit_code == 1 assert \"Test Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without,", "( \"Could not find mapping\" in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output ) # but", "mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an xls file containing several paths and potentially", "context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders = [ x for x in", "lambda: folder_with_some_dicom_files.path) folders = [ x for x in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file()", "def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a directory not containing a mapping file should", "'no mapping' message \"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code == 1 assert", "in mapping.rows for x in y] ) def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result =", "the folder at this point assert not selection_folder.has_file_selection() # but after adding result", "currently assert not selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\",", ") assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"123456.12321313\"", "from click.testing import CliRunner from pytest import fixture from anonapi.cli import entrypoint from", "= runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\"", "when there is no mapping in current dir runner.invoke(delete) assert \"Could not find", ") # but after init there should be a valid mapping runner.invoke(entrypoint.cli, \"map", "settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a file selection to", "result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around the function that adds paths", "assert ( \"Could not find mapping\" in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output ) #", "= runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False ) assert result.exit_code == 0 mapping = runner_with_mapping.mock_context.get_current_mapping()", "== 0 assert \"folder:folder/file4 patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a", "active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), ) @fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click CLIRunner", "add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code == 0 # Then run with input", "assert \"that look like DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should", "runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) # oh no! no mapping yet! assert \"No", "should be a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() assert \"that", "= mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) # assert mapping is as expected mapping =", "anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner,", "from unittest.mock import Mock from click.testing import CliRunner from pytest import fixture from", "has been put in the folder at this point assert not selection_folder.has_file_selection() #", "also, this selection should have been added to the mapping: mapping = context.get_current_mapping()", "runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert result.exit_code == 0 # now three", "\"\"\"Running map info should give you a nice print of contents\"\"\" context =", "mapping = context.get_current_mapping() # reload from disk assert len(mapping.grid) == 1 added =", "this selection should have been added to the mapping: mapping = context.get_current_mapping() #", "result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit,", "active mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with a corrupt mapping", "# First run with regular command line input result = runner.invoke( add_study_folders, args=[str(folders[0])],", "click runner that always injects a MapCommandContext instance\"\"\" def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context)", "no mapping to start with assert ( \"Could not find mapping\" in runner.invoke(entrypoint.cli,", "when activating when there is no mapping in current dir runner.invoke(delete) assert \"Could", "pseudo names from the input file should have been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName)", "result.exit_code == 0 def test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner # there should be", "\"map status\", catch_exceptions=False) assert result.exit_code == 0 assert \"folder:folder/file4 patientName4\" in result.output def", "been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in added] assert pseudo_names[1].value == \"studyA\"", "0 mapping = runner_with_mapping.get_current_mapping() assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"123456.12321313\" ) assert ParameterSet(mapping.rows[-1]).as_dict()[\"pseudo_name\"].value ==", "mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a directory not containing a", "click.testing import CliRunner from pytest import fixture from anonapi.cli import entrypoint from anonapi.cli.map_commands", "current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), ) @fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A", "assert result.exit_code == 0 assert selection_folder.has_file_selection() # also, this selection should have been", "file should have been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in added] assert", "result.exit_code == 0 assert mock_launch.called # now try edit without any mapping being", "Not like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some", "args=[str(selection_folder.path)], catch_exceptions=False ) # There should be a selection there assert result.exit_code ==", "DICOM\" in result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around the function that", "add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert create_fileselection_click_recorder.call_count == 2 assert \"that look like", "MapCommandContext, activate, add_accession_numbers, add_selection, delete, edit, find_dicom_files, add_study_folders, init, status, ) from anonapi.mapper", "works as normal, but calls are recorded \"\"\" recorder = Mock() def find_dicom_files_recorded(*args,", "after adding result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) # There should be", "should give you a nice print of contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir =", "== 0 mapping = runner_with_mapping.get_current_mapping() assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"123456.12321313\" ) assert ParameterSet(mapping.rows[-1]).as_dict()[\"pseudo_name\"].value", "context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) # assert mapping is as expected mapping", "entrypoint from anonapi.cli.map_commands import ( MapCommandContext, activate, add_accession_numbers, add_selection, delete, edit, find_dicom_files, add_study_folders,", "context.current_dir = RESOURCE_PATH / \"test_cli\" runner = mock_main_runner_with_mapping result = runner.invoke(entrypoint.cli, \"map status\",", "Add this folder to mapping result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) #", "/ \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that always injects a MapCommandContext instance\"\"\"", "the function that adds paths to mapping. Function will still works as normal,", "context.get_current_mapping() assert len(mapping.grid) == 20 # now try to add something from the", "directory with some dicom files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders =", "tests.conftest import AnonAPIContextRunner, MockContextCliRunner from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\"", "pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" result", "a mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner = runner_with_mapping result = runner.invoke( add_selection,", "mapping file is found context = mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH / \"test_cli\"", "( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, ) from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter from", "== 1 assert \"No active mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on", "with MapCommandContext that has a valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir)", "\"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner @fixture def", "should be no mapping to start with assert ( \"Could not find mapping\"", "init, status, ) from anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, ) from", "__init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner,", "containing a mapping file should yield a nice 'no mapping' message \"\"\" runner", "runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a file selection to a mapping.\"\"\" mapping_folder, fileselection_path =", "on a directory not containing a mapping file should yield a nice 'no", "not containing a mapping file should yield a nice 'no mapping' message \"\"\"", "\"\"\"Running info with a corrupt mapping file should yield a nice message\"\"\" #", "mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner() result = runner.invoke(entrypoint.cli, \"map", "mapping: mapping = context.get_current_mapping() # reload from disk assert len(mapping.grid) == 1 added", "# relative to the current path assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files", "1 assert \"Test Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom files", "file or directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping runner =", ") assert create_fileselection_click_recorder.call_count == 2 assert \"that look like DICOM\" in result.output def", "mapping is as expected mapping = context.get_current_mapping() assert len(mapping.grid) == 20 # now", "present mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit) assert \"No mapping file found at\" in", "== context.current_dir / \"anon_mapping.csv\" # Graceful error when activating when there is no", "runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) # There should be a selection there assert", "the identifier should be a FileSelectionIdentifier which is # relative to the current", "# also, this selection should have been added to the mapping: mapping =", "selection_folder.has_file_selection() assert \"that look like DICOM\" in result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a", "assert \"No mapping file found at\" in result.output assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping):", "/ \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert result.exit_code == 0", "context = mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\" ) #", "have been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in added] assert pseudo_names[1].value ==", "at\" in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an xls file", "from tests.conftest import AnonAPIContextRunner, MockContextCliRunner from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH /", "len(context.get_current_mapping().grid) == 0 # No selection file has been put in the folder", "MappingLoadError, ) from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings from", "original function, but track calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded,", "\"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that always injects a MapCommandContext instance\"\"\" def", "to mapping\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files # Add", "mapping = runner_with_mapping.get_current_mapping() # TODO: make accessing a specific parameter in a row", "context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return", "0 mapping = runner_with_mapping.get_current_mapping() # TODO: make accessing a specific parameter in a", "context = mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError( \"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file =", "anonapi.cli.map_commands import ( MapCommandContext, activate, add_accession_numbers, add_selection, delete, edit, find_dicom_files, add_study_folders, init, status,", "MappingFile, MappingLoadError, ) from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings", "message\"\"\" # make sure a valid mapping file is found context = mock_main_runner.get_context()", "( RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\" ) # but then raise exception when", "args=[str(selection_folder.path)], catch_exceptions=False, ) # oh no! no mapping yet! assert \"No active mapping\"", "files should not have been selected yet currently assert not selection_folder.has_file_selection() # but", "def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"])", "map info should give you a nice print of contents\"\"\" context = mock_main_runner_with_mapping.get_context()", "yield a nice 'no mapping' message \"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code", "CliRunner from pytest import fixture from anonapi.cli import entrypoint from anonapi.cli.map_commands import (", "find_dicom_files, add_study_folders, init, status, ) from anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError,", "but do not scan\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files", "added = ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) # and the identifier should be a", "have been added added = context.get_current_mapping().grid.rows[20:] assert len(added) == 3 # and the", "== 0 mapping = runner_with_mapping.get_current_mapping() # TODO: make accessing a specific parameter in", "<gh_stars>0 from pathlib import Path from unittest.mock import Mock from click.testing import CliRunner", "0 assert selection_folder.has_file_selection() assert \"that look like DICOM\" in result.output @fixture def create_fileselection_click_recorder(monkeypatch):", "\"map init\", catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as f: f.read() assert result.exit_code", "= mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init) # by default there", "\"\"\"Add all dicom files in this folder to mapping but do not scan\"\"\"", "assert not selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)],", "added.get_param_by_type(SourceIdentifierParameter) # and the identifier should be a FileSelectionIdentifier which is # relative", "not x.is_file() ] # First run with regular command line input result =", "add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) # There should be a selection there assert", "= [ x for x in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ] # First", "# TODO: make accessing a specific parameter in a row easier. Not like", "3 # and the pseudo names from the input file should have been", "should have been added to the mapping: mapping = context.get_current_mapping() # reload from", "mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() #", "\"\"\"Add multiple study folders using the add-study-folders command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context context.current_dir", "There should be a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() assert", "runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an xls file containing several", "mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\" ) # but then", "MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that always injects a MapCommandContext instance\"\"\" def __init__(self, mock_context:", "assert result.exit_code == 1 assert \"No active mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running", "MapCommandContext = runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result =", "at this point assert not selection_folder.has_file_selection() # but after adding result = runner.invoke(", "runner = AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False) assert result.exit_code == 0 assert mock_launch.called", "set runner.invoke(activate) assert settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\" # Graceful error when activating", "runner_with_mapping result = runner.invoke( add_selection, str(fileselection_path), catch_exceptions=False ) assert result.exit_code == 0 mapping", "[\"--input-file\", str(input_file_path)] ) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier", "to mapping but do not scan\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder", "= mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False) assert result.exit_code", "return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add", "selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() # also, this selection should", "import DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner, MockContextCliRunner from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH =", "that passes MapCommandContext without active mapping (active mapping is None) \"\"\" return MockContextCliRunner(", ") # but then raise exception when loading def mock_load(x): raise MappingLoadError(\"Test Exception\")", "DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner, MockContextCliRunner from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH", "files in this folder to mapping\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder", "this folder to mapping but do not scan\"\"\" context = mock_map_context_without runner =", "\"No active mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with a corrupt", "context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file = None #", "20 # now try to add something from the directory with some dicom", "mapping file at\" in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add an", "with assert ( \"Could not find mapping\" in runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output )", "mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit) assert \"No mapping file found at\" in result.output", "be a valid mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME", "result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should give you a nice print of", "len(mapping.grid) == 20 # now try to add something from the directory with", "activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should give you", "a_folder_with_mapping_and_fileselection ): \"\"\"Add a file selection to a mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection", "\"\"\"A click runner that always injects a MapCommandContext instance\"\"\" def __init__(self, mock_context: MapCommandContext):", "in result.output # make one runner.invoke(init) # by default there are no rows", "] # First run with regular command line input result = runner.invoke( add_study_folders,", "catch_exceptions=False) assert result.exit_code == 0 assert not context.settings.active_mapping_file.exists() # deleting again will yield", "be a FileSelectionIdentifier which is # relative to the current path assert not", "0 assert \"folder:folder/file4 patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a directory", "passes MapCommandContext without active mapping (active mapping is None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext(", "and the created mapping should have been activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def", "\"\"\"Add all dicom files in this folder to mapping\"\"\" context = mock_map_context_without runner", "not crash # and the created mapping should have been activated assert mock_main_runner.get_context().settings.active_mapping_file", "[\"12344556.12342345\"]) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() # TODO: make accessing a", "but track calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return", "to a mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner = runner_with_mapping result = runner.invoke(", "have been selected yet currently assert not selection_folder.has_file_selection() # but after adding result", "numbers to a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\" result =", "= mock_main_runner # there should be no mapping to start with assert (", "( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers to a", "= runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code == 0 # Then run", "in the folder at this point assert not selection_folder.has_file_selection() # but after adding", "folder_with_some_dicom_files ): \"\"\"Add all dicom files in this folder to mapping but do", "= lambda: NotImplementedError( \"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\" return", "deleting again will yield nice message result = runner.invoke(delete) assert result.exit_code == 1", "one runner.invoke(init) # by default there are no rows mapping assert len(context.get_current_mapping().grid) ==", "catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner,", "now try to add something from the directory with some dicom files context.current_dir", ") from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings from tests.conftest", "tests import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner", ") def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with open(Path(tmpdir)", "anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, ) from anonapi.parameters import ParameterSet, PseudoName,", "+ names) input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\" result = runner.invoke( add_study_folders,", "= context.get_current_mapping() assert len(mapping.grid) == 20 # now try to add something from", "identifier = added.get_param_by_type(SourceIdentifierParameter) # and the identifier should be a FileSelectionIdentifier which is", "dir runner.invoke(delete) assert \"Could not find mapping file at\" in runner.invoke(activate).output def test_cli_map_add_paths_file(", "catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping):", "import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, ) from anonapi.parameters import ParameterSet, PseudoName, SourceIdentifierParameter", "no! no mapping yet! assert \"No active mapping\" in result.output # make one", "a decorator around the function that adds paths to mapping. Function will still", "runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple study folders using the add-study-folders command\"\"\"", "is # relative to the current path assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without,", "result = runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\" in", "dicom files in this folder to mapping\"\"\" context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context)", "): \"\"\"Add multiple study folders using the add-study-folders command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context", "files in this folder to mapping but do not scan\"\"\" context = mock_map_context_without", "# now try edit without any mapping being present mock_launch.reset_mock() runner.invoke(delete) result =", "try to add something from the directory with some dicom files context.current_dir =", "= runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() # TODO: make", "pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in added] assert pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value", "\"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for y in mapping.rows for x in y] )", "= Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original function, but track calls\"\"\" recorder(*args,", "unittest.mock import Mock from click.testing import CliRunner from pytest import fixture from anonapi.cli", "after adding result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) # There should", "mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError( \"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file = a_folder_with_mapping /", "CLIRunner with MapCommandContext that has a valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def", "0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x)", "been activated assert mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should give", "result.exit_code == 0 assert \"folder:folder/file4 patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on", "= AnonAPIContextRunner(mock_context=context) result = runner.invoke(edit, catch_exceptions=False) assert result.exit_code == 0 assert mock_launch.called #", "= AnonAPIContextRunner(mock_context=context) # assert mapping is as expected mapping = context.get_current_mapping() assert len(mapping.grid)", "three rows should have been added added = context.get_current_mapping().grid.rows[20:] assert len(added) == 3", "as normal, but calls are recorded \"\"\" recorder = Mock() def find_dicom_files_recorded(*args, **kwargs):", "result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() # TODO: make accessing a specific parameter", "info with a corrupt mapping file should yield a nice message\"\"\" # make", "something from the directory with some dicom files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda:", "\"No such file or directory\" in result.output def test_cli_map_edit(mock_map_context_with_mapping, mock_launch): context = mock_map_context_with_mapping", "but calls are recorded \"\"\" recorder = Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the", "== \"studyA\" assert pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers to", "= folder_with_some_dicom_files # Add this folder to mapping result = runner.invoke( add_study_folders, args=[str(selection_folder.path)],", "selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, )", "catch_exceptions=False ) assert result.exit_code == 0 # now three rows should have been", "0 # dicom files should not have been selected yet currently assert not", "def mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner", "recorder = Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original function, but track calls\"\"\"", "for x in y] ) def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map", "1 assert \"No active mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with", "x in y] ) def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map init\",", "= AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files # Add this folder to mapping result =", "result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a directory not containing a mapping file", "result = runner.invoke(edit, catch_exceptions=False) assert result.exit_code == 0 assert mock_launch.called # now try", "= MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\" result = runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False", "add_selection, str(fileselection_path), catch_exceptions=False ) assert result.exit_code == 0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping)", "mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not crash #", "create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around the function that adds paths to mapping. Function", "result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() # TODO:", "again will yield nice message result = runner.invoke(delete) assert result.exit_code == 1 assert", "= mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file = None # we", "context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init) # by default", "# but after adding result = runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False ) # There", "MockContextCliRunner from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A", "you a nice print of contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH /", "**kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch,", "from anonapi.settings import DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner, MockContextCliRunner from tests import RESOURCE_PATH", "**kwargs): \"\"\"Run the original function, but track calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs)", "from the directory with some dicom files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path)", "numbers to a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code == 0 mapping", "str(input_file_path)] ) assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier ==", "context = mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files # Add this folder", "Mock() def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original function, but track calls\"\"\" recorder(*args, **kwargs)", "RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that always", "mapping\" in result.output # make one runner.invoke(init) # by default there are no", "to the mapping: mapping = context.get_current_mapping() # reload from disk assert len(mapping.grid) ==", "= runner.invoke(edit, catch_exceptions=False) assert result.exit_code == 0 assert mock_launch.called # now try edit", "test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file = None", "catch_exceptions=False) with open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as f: f.read() assert result.exit_code == 0", "folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders = [ x for x in folder_with_some_dicom_files.path.glob(\"*\") if", "with regular command line input result = runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert", "= folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"],", "with input file input (input file contains 2 folders + names) input_file_path =", "settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), ) @fixture def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click", "\"folder:folder/file4 patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running info on a directory not containing", "def find_dicom_files_recorded(*args, **kwargs): \"\"\"Run the original function, but track calls\"\"\" recorder(*args, **kwargs) return", "mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\"", "regular command line input result = runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code", "\"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" input_file_path =", "mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init) # by default there are", "contents\"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False)", "not selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False,", "\"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path) ) result = runner_with_mapping.invoke( add_study_folders, args=[\"--no-check-dicom\", \"*\"], catch_exceptions=False, ) assert", "nice print of contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH / \"test_cli\" runner", "all dicom files in this folder to mapping\"\"\" context = mock_map_context_without runner =", "def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with open(Path(tmpdir) /", "assert result.exit_code == 0 assert selection_folder.has_file_selection() assert \"that look like DICOM\" in result.output", "be a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() # also, this", "make one runner.invoke(init) # by default there are no rows mapping assert len(context.get_current_mapping().grid)", "file found at\" in result.output assert not mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping", "MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context", "input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_folder_names.xlsx\" result = runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)],", "assert result.exit_code == 1 assert \"No such file or directory\" in result.output def", "parameter in a row easier. Not like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\"", "lambda: NotImplementedError( \"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner", "yet currently assert not selection_folder.has_file_selection() # but after adding result = runner.invoke( add_study_folders,", "in current dir runner.invoke(delete) assert \"Could not find mapping file at\" in runner.invoke(activate).output", "x in added] assert pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping):", "PseudoName, SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner, MockContextCliRunner from tests", "def get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping): context = mock_main_runner.get_context()", "2 assert \"that look like DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info", "def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with a corrupt mapping file should yield a", "= context.get_current_mapping().grid.rows[20:] assert len(added) == 3 # and the pseudo names from the", "like DICOM\" in result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around the function", "to mapping. Function will still works as normal, but calls are recorded \"\"\"", "which is # relative to the current path assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check(", "assert result.exit_code == 0 # Then run with input file input (input file", "# and the pseudo names from the input file should have been included", ") # There should be a selection there assert result.exit_code == 0 assert", "= folder_with_some_dicom_files runner.invoke(init) # by default there are no rows assert len(context.get_current_mapping().grid) ==", "message \"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code == 1 assert \"No active", "recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple study folders using", "active mapping should be set runner.invoke(activate) assert settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\" #", "0 assert mock_launch.called # now try edit without any mapping being present mock_launch.reset_mock()", "AnonAPIContextRunner(mock_context=context) # assert mapping is as expected mapping = context.get_current_mapping() assert len(mapping.grid) ==", "mapping yet! assert \"No active mapping\" in result.output # make one runner.invoke(init) #", "MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ), ) @fixture def runner_with_mapping(mock_map_context_with_mapping)", "mock_launch.called def test_cli_map_activate(mock_map_context_with_mapping): context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file", "below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers", "to start with assert ( \"Could not find mapping\" in runner.invoke(entrypoint.cli, \"map activate\",", "mock_map_context_without runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files # Add this folder to mapping", "return recorder def test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple study folders", "paths and potentially pseudonyms to an existing mapping \"\"\" context = mock_map_context_with_mapping runner", "to add something from the directory with some dicom files context.current_dir = folder_with_some_dicom_files.path", "included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in added] assert pseudo_names[1].value == \"studyA\" assert", "mock_main_runner.get_context().settings.active_mapping_file == mapping_path def test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should give you a nice", "runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 0 assert \"folder:folder/file4 patientName4\" in result.output", "a FileSelectionIdentifier which is # relative to the current path assert not identifier.path.is_absolute()", "paths to mapping. Function will still works as normal, but calls are recorded", "# deleting again will yield nice message result = runner.invoke(delete) assert result.exit_code ==", "== \"12344556.12342345\" ) def test_cli_map_add_accession_numbers_file(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" input_file_path", "result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] ) assert result.exit_code == 0 mapping =", "# No selection file has been put in the folder at this point", "folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ] # First run with regular command line input", "== 0 # dicom files should not have been selected yet currently assert", "runner.invoke(entrypoint.cli, \"map activate\", catch_exceptions=False).output ) # but after init there should be a", "default there are no rows assert len(context.get_current_mapping().grid) == 0 # dicom files should", "None # we start with a mapping file, but no active # after", "been added added = context.get_current_mapping().grid.rows[20:] assert len(added) == 3 # and the pseudo", "file selection to a mapping.\"\"\" mapping_folder, fileselection_path = a_folder_with_mapping_and_fileselection runner = runner_with_mapping result", "there assert result.exit_code == 0 assert selection_folder.has_file_selection() # also, this selection should have", "assert len(mapping.grid) == 1 added = ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) # and the", "= a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext(", "mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not crash # and the created mapping should have", "now three rows should have been added added = context.get_current_mapping().grid.rows[20:] assert len(added) ==", "= mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\" ) # but", "def mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner() result = runner.invoke(entrypoint.cli,", "of contents\"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result = runner.invoke(delete,", "loading def mock_load(x): raise MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner() result =", "def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info on a directory not containing a mapping file should", "result.exit_code == 1 assert \"No active mapping\" in result.output def test_cli_map_info_no_active_mapping(runner_without_mapping): \"\"\"Running info", "\"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\"", "str(fileselection_path), catch_exceptions=False ) assert result.exit_code == 0 mapping = runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) ==", "runner = mock_main_runner # there should be no mapping to start with assert", "assert result.exit_code == 0 mapping = runner_with_mapping.get_current_mapping() # TODO: make accessing a specific", "settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that passes MapCommandContext without active mapping", "in result.output @fixture def create_fileselection_click_recorder(monkeypatch): \"\"\"Add a decorator around the function that adds", "a row easier. Not like below. assert ( ParameterSet(mapping.rows[-1]).as_dict()[\"source\"].value.identifier == \"12344556.12342345\" ) def", "from pytest import fixture from anonapi.cli import entrypoint from anonapi.cli.map_commands import ( MapCommandContext,", "there is no mapping in current dir runner.invoke(delete) assert \"Could not find mapping", "always injects a MapCommandContext instance\"\"\" def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) ->", "\"\".join( [str(x) for y in mapping.rows for x in y] ) def test_cli_map(mock_main_runner,", "give you a nice print of contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH", "== 1 assert \"Test Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom", "rows should have been added added = context.get_current_mapping().grid.rows[20:] assert len(added) == 3 #", "command line input result = runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code ==", "there are no rows mapping assert len(context.get_current_mapping().grid) == 0 # No selection file", "using the add-study-folders command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\",", "adding result = runner.invoke( add_study_folders, args=[\"--no-check-dicom\", str(selection_folder.path)], catch_exceptions=False, ) # There should be", "MappingLoadError(\"Test Exception\") monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner() result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False)", "CliRunner() result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"Test", "test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some accession numbers to a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert", "import fixture from anonapi.cli import entrypoint from anonapi.cli.map_commands import ( MapCommandContext, activate, add_accession_numbers,", "runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\" in", "/ \"anon_mapping.csv\" ) # but then raise exception when loading def mock_load(x): raise", "mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\",", "/ \"some_folder_names.xlsx\" result = runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert result.exit_code ==", "(active mapping is None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def", "try edit without any mapping being present mock_launch.reset_mock() runner.invoke(delete) result = runner.invoke(edit) assert", "== 0 # No selection file has been put in the folder at", "folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple study folders using the add-study-folders command\"\"\" context:", "activating when there is no mapping in current dir runner.invoke(delete) assert \"Could not", "added added = context.get_current_mapping().grid.rows[20:] assert len(added) == 3 # and the pseudo names", "no rows mapping assert len(context.get_current_mapping().grid) == 0 # No selection file has been", "@fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping / \"anon_mapping.csv\" ),", "status\", catch_exceptions=False) assert result.exit_code == 0 assert \"folder:folder/file4 patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner):", "not have been selected yet currently assert not selection_folder.has_file_selection() # but after adding", "assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for y in mapping.rows for x in y]", "nice 'no mapping' message \"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code == 1", "\"r\") as f: f.read() assert result.exit_code == 0 def test_cli_map_init(mock_main_runner, tmpdir): runner =", "in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all dicom files in this folder to", "= None # we start with a mapping file, but no active #", "selected yet currently assert not selection_folder.has_file_selection() # but after adding result = runner.invoke(", "add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert result.exit_code == 0 # now three rows", "rows assert len(context.get_current_mapping().grid) == 0 # dicom files should not have been selected", "= mock_main_runner result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert", "some accession numbers to a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\"", "accession numbers to a mapping\"\"\" result = runner_with_mapping.invoke(add_accession_numbers, [\"12344556.12342345\"]) assert result.exit_code == 0", "None) \"\"\" return MockContextCliRunner( mock_context=MapCommandContext( current_dir=tmpdir, settings=DefaultAnonClientSettings() ) ) def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection", "return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext: return MapCommandContext( current_dir=a_folder_with_mapping, settings=DefaultAnonClientSettings( active_mapping_file=a_folder_with_mapping /", "runner = AnonAPIContextRunner(mock_context=context) selection_folder = folder_with_some_dicom_files runner.invoke(init) # by default there are no", "anonapi.cli import entrypoint from anonapi.cli.map_commands import ( MapCommandContext, activate, add_accession_numbers, add_selection, delete, edit,", "has a valid active mapping\"\"\" return MappingContextRunner(mock_context=mock_map_context_with_mapping) @fixture def mock_map_context_without(tmpdir) -> MapCommandContext: return", "assert \"No active mapping\" in result.output # make one runner.invoke(init) # by default", "should have been included pseudo_names = [ParameterSet(x).get_param_by_type(PseudoName) for x in added] assert pseudo_names[1].value", "= runner.invoke( add_study_folders, args=[str(selection_folder.path)], catch_exceptions=False, ) # oh no! no mapping yet! assert", "found context = mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\" )", "= mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError( \"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file = a_folder_with_mapping", "test_cli_map_add_study_folders( runner_with_mapping, folder_with_mapping_and_some_dicom_files, create_fileselection_click_recorder, monkeypatch, ): \"\"\"Add multiple study folders using the add-study-folders", "should give you a nice print of contents\"\"\" context = mock_map_context_with_mapping runner =", "settings = context.settings settings.active_mapping_file = None # we start with a mapping file,", "runner.invoke( add_study_folders, args=[str(folders[0])], catch_exceptions=False ) assert result.exit_code == 0 # Then run with", "added] assert pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value == \"studyB\" def test_cli_map_add_accession_numbers(runner_with_mapping): \"\"\"Add some", "open(Path(tmpdir) / \"anon_mapping.csv\", \"r\") as f: f.read() assert result.exit_code == 0 def test_cli_map_init(mock_main_runner,", "assert not identifier.path.is_absolute() def test_cli_map_add_folder_no_check( mock_map_context_without, folder_with_some_dicom_files ): \"\"\"Add all dicom files in", "file should yield a nice 'no mapping' message \"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False)", "# reload from disk assert len(mapping.grid) == 1 added = ParameterSet(mapping.grid.rows[-1]) identifier =", "activating, active mapping should be set runner.invoke(activate) assert settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\"", "xls file containing several paths and potentially pseudonyms to an existing mapping \"\"\"", "mapping.rows for x in y] ) def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli,", "create_fileselection_click_recorder.call_count == 2 assert \"that look like DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running", "expected mapping = context.get_current_mapping() assert len(mapping.grid) == 20 # now try to add", "status\", catch_exceptions=False) assert result.exit_code == 1 assert \"No active mapping\" in result.output def", "catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert mapping_path.exists() MappingFile(mapping_path).load_mapping() # should not crash", "reload from disk assert len(mapping.grid) == 1 added = ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter)", "= MAPPER_RESOURCE_PATH / \"inputfile\" / \"some_accession_numbers.xlsx\" result = runner_with_mapping.invoke( add_accession_numbers, [\"--input-file\", str(input_file_path)] )", "injects a MapCommandContext instance\"\"\" def __init__(self, mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping:", "mapping' message \"\"\" runner = mock_main_runner result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert", "added to the mapping: mapping = context.get_current_mapping() # reload from disk assert len(mapping.grid)", "# and the identifier should be a FileSelectionIdentifier which is # relative to", "still works as normal, but calls are recorded \"\"\" recorder = Mock() def", "== 1 added = ParameterSet(mapping.grid.rows[-1]) identifier = added.get_param_by_type(SourceIdentifierParameter) # and the identifier should", "test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner # there should be no mapping to start", "mapping. Function will still works as normal, but calls are recorded \"\"\" recorder", "a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection() assert \"that look like", "add_selection, delete, edit, find_dicom_files, add_study_folders, init, status, ) from anonapi.mapper import ( DEFAULT_MAPPING_NAME,", "runner_with_mapping.mock_context.get_current_mapping() assert len(mapping) == 21 assert \"fileselection:a_folder/a_file_selection.txt\" in \"\".join( [str(x) for y in", "catch_exceptions=False).output ) # but after init there should be a valid mapping runner.invoke(entrypoint.cli,", "result.exit_code == 1 assert \"Test Exception\" in result.output def test_cli_map_add_folder(mock_map_context_without, folder_with_some_dicom_files): \"\"\"Add all", "import entrypoint from anonapi.cli.map_commands import ( MapCommandContext, activate, add_accession_numbers, add_selection, delete, edit, find_dicom_files,", "runner = AnonAPIContextRunner(mock_context=context) # assert mapping is as expected mapping = context.get_current_mapping() assert", "# Graceful error when activating when there is no mapping in current dir", "add-study-folders command\"\"\" context: MapCommandContext = runner_with_mapping.mock_context context.current_dir = folder_with_mapping_and_some_dicom_files.path monkeypatch.setattr( \"os.getcwd\", lambda: str(folder_with_mapping_and_some_dicom_files.path)", "catch_exceptions=False) assert result.exit_code == 0 assert \"folder:folder/file4 patientName4\" in result.output def test_cli_map_info_empty_dir(mock_main_runner): \"\"\"Running", ") # oh no! no mapping yet! assert \"No active mapping\" in result.output", "is no mapping in current dir runner.invoke(delete) assert \"Could not find mapping file", "find mapping file at\" in runner.invoke(activate).output def test_cli_map_add_paths_file( mock_map_context_with_mapping, folder_with_some_dicom_files, monkeypatch ): \"\"\"Add", "a valid mapping runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) mapping_path = mock_main_runner.get_context().current_dir / DEFAULT_MAPPING_NAME assert", "mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False) assert result.exit_code ==", "from anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping, MappingFile, MappingLoadError, ) from anonapi.parameters import ParameterSet,", "assert create_fileselection_click_recorder.call_count == 2 assert \"that look like DICOM\" in result.output def test_cli_map_delete(mock_map_context_with_mapping):", "= runner.invoke(delete) assert result.exit_code == 1 assert \"No such file or directory\" in", "test_cli_map_delete(mock_map_context_with_mapping): \"\"\"Running map info should give you a nice print of contents\"\"\" context", "accessing a specific parameter in a row easier. Not like below. assert (", "delete, edit, find_dicom_files, add_study_folders, init, status, ) from anonapi.mapper import ( DEFAULT_MAPPING_NAME, Mapping,", "# but after init there should be a valid mapping runner.invoke(entrypoint.cli, \"map init\",", "mock_context: MapCommandContext): super().__init__(mock_context=mock_context) def get_current_mapping(self) -> Mapping: return self.mock_context.get_current_mapping() @fixture def mock_main_runner_with_mapping(mock_main_runner, a_folder_with_mapping):", "y] ) def test_cli_map(mock_main_runner, mock_cli_base_context, tmpdir): result = mock_main_runner.invoke(entrypoint.cli, \"map init\", catch_exceptions=False) with", "but no active # after activating, active mapping should be set runner.invoke(activate) assert", "for x in added] assert pseudo_names[1].value == \"studyA\" assert pseudo_names[2].value == \"studyB\" def", "settings.active_mapping_file == context.current_dir / \"anon_mapping.csv\" # Graceful error when activating when there is", "file should yield a nice 'no mapping' message \"\"\" runner = mock_main_runner result", "def runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click CLIRunner with MapCommandContext that has a valid", "assert context.settings.active_mapping_file.exists() result = runner.invoke(delete, catch_exceptions=False) assert result.exit_code == 0 assert not context.settings.active_mapping_file.exists()", "monkeypatch ): \"\"\"Add an xls file containing several paths and potentially pseudonyms to", "= AnonAPIContextRunner(mock_context=context) settings = context.settings settings.active_mapping_file = None # we start with a", "a nice 'no mapping' message \"\"\" result = runner_without_mapping.invoke(status, catch_exceptions=False) assert result.exit_code ==", "print of contents\"\"\" context = mock_map_context_with_mapping runner = AnonAPIContextRunner(mock_context=context) assert context.settings.active_mapping_file.exists() result =", "TODO: make accessing a specific parameter in a row easier. Not like below.", "folder at this point assert not selection_folder.has_file_selection() # but after adding result =", "add something from the directory with some dicom files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\",", "# by default there are no rows mapping assert len(context.get_current_mapping().grid) == 0 #", "mock_map_context_without(tmpdir) -> MapCommandContext: return MapCommandContext(current_dir=tmpdir, settings=DefaultAnonClientSettings(),) @fixture def runner_without_mapping(tmpdir): \"\"\"A click CLIRunner that", "assert result.exit_code == 1 assert \"No active mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch):", "x in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ] # First run with regular command", "runner_with_mapping(mock_map_context_with_mapping) -> MappingContextRunner: \"\"\"A click CLIRunner with MapCommandContext that has a valid active", "\"anon_mapping.csv\" # Graceful error when activating when there is no mapping in current", ") context.settings.active_mapping_file = a_folder_with_mapping / \"anon_mapping.csv\" return mock_main_runner @fixture def mock_map_context_with_mapping(a_folder_with_mapping) -> MapCommandContext:", "# There should be a selection there assert result.exit_code == 0 assert selection_folder.has_file_selection()", ") def test_cli_map_add_selection( runner_with_mapping, a_folder_with_mapping_and_fileselection ): \"\"\"Add a file selection to a mapping.\"\"\"", "SourceIdentifierParameter from anonapi.settings import DefaultAnonClientSettings from tests.conftest import AnonAPIContextRunner, MockContextCliRunner from tests import", "monkeypatch.setattr(\"anonapi.mapper.JobParameterGrid.load\", mock_load) runner = CliRunner() result = runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code", "a nice print of contents\"\"\" context = mock_main_runner_with_mapping.get_context() context.current_dir = RESOURCE_PATH / \"test_cli\"", "= runner.invoke(entrypoint.cli, \"map status\", catch_exceptions=False) assert result.exit_code == 1 assert \"Test Exception\" in", "catch_exceptions=False, ) # There should be a selection there assert result.exit_code == 0", "the directory with some dicom files context.current_dir = folder_with_some_dicom_files.path monkeypatch.setattr(\"os.getcwd\", lambda: folder_with_some_dicom_files.path) folders", "is found context = mock_main_runner.get_context() context.settings.active_mapping_file = ( RESOURCE_PATH / \"test_cli\" / \"anon_mapping.csv\"", "function, but track calls\"\"\" recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, )", "catch_exceptions=False ) assert result.exit_code == 0 # Then run with input file input", "\"\"\"Add some accession numbers to a mapping\"\"\" input_file_path = MAPPER_RESOURCE_PATH / \"inputfile\" /", "for x in folder_with_some_dicom_files.path.glob(\"*\") if not x.is_file() ] # First run with regular", "AnonAPIContextRunner, MockContextCliRunner from tests import RESOURCE_PATH MAPPER_RESOURCE_PATH = RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner):", "RESOURCE_PATH / \"test_mapper\" class MappingContextRunner(AnonAPIContextRunner): \"\"\"A click runner that always injects a MapCommandContext", "test_cli_map_info(mock_main_runner_with_mapping): \"\"\"Running map info should give you a nice print of contents\"\"\" context", "recorder(*args, **kwargs) return find_dicom_files(*args, **kwargs) monkeypatch.setattr( \"anonapi.cli.map_commands.find_dicom_files\", find_dicom_files_recorded, ) return recorder def test_cli_map_add_study_folders(", "= runner.invoke( add_study_folders, args=[\"-f\", str(input_file_path)], catch_exceptions=False ) assert result.exit_code == 0 # now", "a_folder_with_mapping): context = mock_main_runner.get_context() context.current_dir = lambda: NotImplementedError( \"Call settings.active_mapping_file instead\" ) context.settings.active_mapping_file", "as f: f.read() assert result.exit_code == 0 def test_cli_map_init(mock_main_runner, tmpdir): runner = mock_main_runner", "mapping\" in result.output def test_cli_map_info_load_exception(mock_main_runner, monkeypatch): \"\"\"Running info with a corrupt mapping file" ]
[ "<filename>Sets/set .add().py amountInputs = int(input()) countryList = set() for i in range(amountInputs): countryList.add(input())", ".add().py amountInputs = int(input()) countryList = set() for i in range(amountInputs): countryList.add(input()) print(len(countryList))" ]
[ "Users Author: <NAME> ''' from django.conf.urls import url from . import views #", "<NAME> ''' from django.conf.urls import url from . import views # Authentiction urls", "urls urlpatterns = [ url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/',", "views # Authentiction urls urlpatterns = [ url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword),", "''' Authentication urls for ToDos Users Author: <NAME> ''' from django.conf.urls import url", "= [ url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/',", ". import views # Authentiction urls urlpatterns = [ url(r'^login/', views._login), url(r'^signup/', views._register),", "views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/', views._profile), # url(r'^activate/(?P<id>[0-9]+)/(?P<token>[-\\w]+)', views._activate), # url(r'^resend_activation_email/(?P<id>[0-9]+)', views.resend_activation_email),", "from . import views # Authentiction urls urlpatterns = [ url(r'^login/', views._login), url(r'^signup/',", "ToDos Users Author: <NAME> ''' from django.conf.urls import url from . import views", "Authentiction urls urlpatterns = [ url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout),", "url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/', views._profile), # url(r'^activate/(?P<id>[0-9]+)/(?P<token>[-\\w]+)', views._activate),", "import views # Authentiction urls urlpatterns = [ url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/',", "[ url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/', views._profile),", "url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/', views._profile), # url(r'^activate/(?P<id>[0-9]+)/(?P<token>[-\\w]+)', views._activate), # url(r'^resend_activation_email/(?P<id>[0-9]+)', views.resend_activation_email), ]", "urls for ToDos Users Author: <NAME> ''' from django.conf.urls import url from .", "# Authentiction urls urlpatterns = [ url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/',", "django.conf.urls import url from . import views # Authentiction urls urlpatterns = [", "url from . import views # Authentiction urls urlpatterns = [ url(r'^login/', views._login),", "url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/', views._profile), #", "from django.conf.urls import url from . import views # Authentiction urls urlpatterns =", "views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/', views._profile), # url(r'^activate/(?P<id>[0-9]+)/(?P<token>[-\\w]+)',", "Author: <NAME> ''' from django.conf.urls import url from . import views # Authentiction", "views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/', views._profile), # url(r'^activate/(?P<id>[0-9]+)/(?P<token>[-\\w]+)', views._activate), #", "url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload), url(r'^profile/', views._profile), # url(r'^activate/(?P<id>[0-9]+)/(?P<token>[-\\w]+)', views._activate), # url(r'^resend_activation_email/(?P<id>[0-9]+)',", "Authentication urls for ToDos Users Author: <NAME> ''' from django.conf.urls import url from", "import url from . import views # Authentiction urls urlpatterns = [ url(r'^login/',", "urlpatterns = [ url(r'^login/', views._login), url(r'^signup/', views._register), url(r'^change_password/', views._changePassword), url(r'^logout/', views._logout), url(r'^upload/', views._upload),", "for ToDos Users Author: <NAME> ''' from django.conf.urls import url from . import", "''' from django.conf.urls import url from . import views # Authentiction urls urlpatterns" ]
[ "tasks: List[str]) -> Fetcher: fetcher = Fetcher() uniprot_fetcher = UniprotFetcher() for task in", "UniprotFetcher() for task in tasks: if task.startswith(\"sequence\"): uniprot_fetcher.register(SequenceFetcher) if task.startswith(\"subcellular_localization\"): uniprot_fetcher.register(LocalizationFetcher) fetcher.register(uniprot_fetcher) return", "requests all the data relevant to `tasks` in a single call to its", "data fetcher for the given `tasks`. A `Fetcher` is returned which requests all", "IDs present. This file can be created by running the `get_protein_ids.py` file in", "the `get_protein_ids.py` file in `scripts`. \"\"\" def __new__(cls, tasks: List[str]) -> Fetcher: fetcher", "once, rather than for every task individually. NOTE: It is assumed that a", "returned which requests all the data relevant to `tasks` in a single call", "in `scripts`. \"\"\" def __new__(cls, tasks: List[str]) -> Fetcher: fetcher = Fetcher() uniprot_fetcher", "returns the correct data fetcher for the given `tasks`. A `Fetcher` is returned", "present. This file can be created by running the `get_protein_ids.py` file in `scripts`.", "\"\"\"A factory function which returns the correct data fetcher for the given `tasks`.", "to `tasks` in a single call to its `fetch` method. This ensures the", "This ensures the API endpoints are only queried once, rather than for every", "NOTE: It is assumed that a `benchmark.json` file already exists, with at least", "file already exists, with at least the gene IDs present. This file can", "task individually. NOTE: It is assumed that a `benchmark.json` file already exists, with", "its `fetch` method. This ensures the API endpoints are only queried once, rather", "rather than for every task individually. NOTE: It is assumed that a `benchmark.json`", "Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A factory function which returns the correct", "ensures the API endpoints are only queried once, rather than for every task", "assumed that a `benchmark.json` file already exists, with at least the gene IDs", "single call to its `fetch` method. This ensures the API endpoints are only", "SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A factory function which returns the correct data fetcher", "factory function which returns the correct data fetcher for the given `tasks`. A", "queried once, rather than for every task individually. NOTE: It is assumed that", "a `benchmark.json` file already exists, with at least the gene IDs present. This", "to its `fetch` method. This ensures the API endpoints are only queried once,", "`Fetcher` is returned which requests all the data relevant to `tasks` in a", "than for every task individually. NOTE: It is assumed that a `benchmark.json` file", "file in `scripts`. \"\"\" def __new__(cls, tasks: List[str]) -> Fetcher: fetcher = Fetcher()", "the correct data fetcher for the given `tasks`. A `Fetcher` is returned which", "Fetcher() uniprot_fetcher = UniprotFetcher() for task in tasks: if task.startswith(\"sequence\"): uniprot_fetcher.register(SequenceFetcher) if task.startswith(\"subcellular_localization\"):", "LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A factory function which returns the correct data", "from typing import List from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher:", "-> Fetcher: fetcher = Fetcher() uniprot_fetcher = UniprotFetcher() for task in tasks: if", "\"\"\" def __new__(cls, tasks: List[str]) -> Fetcher: fetcher = Fetcher() uniprot_fetcher = UniprotFetcher()", "in a single call to its `fetch` method. This ensures the API endpoints", "`scripts`. \"\"\" def __new__(cls, tasks: List[str]) -> Fetcher: fetcher = Fetcher() uniprot_fetcher =", "class AutoFetcher: \"\"\"A factory function which returns the correct data fetcher for the", "import List from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A factory", "can be created by running the `get_protein_ids.py` file in `scripts`. \"\"\" def __new__(cls,", "__new__(cls, tasks: List[str]) -> Fetcher: fetcher = Fetcher() uniprot_fetcher = UniprotFetcher() for task", "from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A factory function which", "endpoints are only queried once, rather than for every task individually. NOTE: It", "for every task individually. NOTE: It is assumed that a `benchmark.json` file already", "gene IDs present. This file can be created by running the `get_protein_ids.py` file", "is returned which requests all the data relevant to `tasks` in a single", "method. This ensures the API endpoints are only queried once, rather than for", "= Fetcher() uniprot_fetcher = UniprotFetcher() for task in tasks: if task.startswith(\"sequence\"): uniprot_fetcher.register(SequenceFetcher) if", "UniprotFetcher class AutoFetcher: \"\"\"A factory function which returns the correct data fetcher for", "data relevant to `tasks` in a single call to its `fetch` method. This", "by running the `get_protein_ids.py` file in `scripts`. \"\"\" def __new__(cls, tasks: List[str]) ->", "fetcher for the given `tasks`. A `Fetcher` is returned which requests all the", "all the data relevant to `tasks` in a single call to its `fetch`", "is assumed that a `benchmark.json` file already exists, with at least the gene", "already exists, with at least the gene IDs present. This file can be", "This file can be created by running the `get_protein_ids.py` file in `scripts`. \"\"\"", "fetcher = Fetcher() uniprot_fetcher = UniprotFetcher() for task in tasks: if task.startswith(\"sequence\"): uniprot_fetcher.register(SequenceFetcher)", "file can be created by running the `get_protein_ids.py` file in `scripts`. \"\"\" def", "the gene IDs present. This file can be created by running the `get_protein_ids.py`", "the API endpoints are only queried once, rather than for every task individually.", "every task individually. NOTE: It is assumed that a `benchmark.json` file already exists,", "the data relevant to `tasks` in a single call to its `fetch` method.", "def __new__(cls, tasks: List[str]) -> Fetcher: fetcher = Fetcher() uniprot_fetcher = UniprotFetcher() for", "AutoFetcher: \"\"\"A factory function which returns the correct data fetcher for the given", "It is assumed that a `benchmark.json` file already exists, with at least the", "typing import List from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A", "uniprot_fetcher = UniprotFetcher() for task in tasks: if task.startswith(\"sequence\"): uniprot_fetcher.register(SequenceFetcher) if task.startswith(\"subcellular_localization\"): uniprot_fetcher.register(LocalizationFetcher)", "import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A factory function which returns the", "correct data fetcher for the given `tasks`. A `Fetcher` is returned which requests", "relevant to `tasks` in a single call to its `fetch` method. This ensures", "`fetch` method. This ensures the API endpoints are only queried once, rather than", "only queried once, rather than for every task individually. NOTE: It is assumed", "which requests all the data relevant to `tasks` in a single call to", "A `Fetcher` is returned which requests all the data relevant to `tasks` in", "the given `tasks`. A `Fetcher` is returned which requests all the data relevant", "least the gene IDs present. This file can be created by running the", "exists, with at least the gene IDs present. This file can be created", "given `tasks`. A `Fetcher` is returned which requests all the data relevant to", "`tasks` in a single call to its `fetch` method. This ensures the API", "running the `get_protein_ids.py` file in `scripts`. \"\"\" def __new__(cls, tasks: List[str]) -> Fetcher:", "Fetcher: fetcher = Fetcher() uniprot_fetcher = UniprotFetcher() for task in tasks: if task.startswith(\"sequence\"):", "geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A factory function which returns", "with at least the gene IDs present. This file can be created by", "function which returns the correct data fetcher for the given `tasks`. A `Fetcher`", "be created by running the `get_protein_ids.py` file in `scripts`. \"\"\" def __new__(cls, tasks:", "for the given `tasks`. A `Fetcher` is returned which requests all the data", "API endpoints are only queried once, rather than for every task individually. NOTE:", "= UniprotFetcher() for task in tasks: if task.startswith(\"sequence\"): uniprot_fetcher.register(SequenceFetcher) if task.startswith(\"subcellular_localization\"): uniprot_fetcher.register(LocalizationFetcher) fetcher.register(uniprot_fetcher)", "are only queried once, rather than for every task individually. NOTE: It is", "call to its `fetch` method. This ensures the API endpoints are only queried", "`tasks`. A `Fetcher` is returned which requests all the data relevant to `tasks`", "a single call to its `fetch` method. This ensures the API endpoints are", "at least the gene IDs present. This file can be created by running", "`benchmark.json` file already exists, with at least the gene IDs present. This file", "which returns the correct data fetcher for the given `tasks`. A `Fetcher` is", "`get_protein_ids.py` file in `scripts`. \"\"\" def __new__(cls, tasks: List[str]) -> Fetcher: fetcher =", "List[str]) -> Fetcher: fetcher = Fetcher() uniprot_fetcher = UniprotFetcher() for task in tasks:", "created by running the `get_protein_ids.py` file in `scripts`. \"\"\" def __new__(cls, tasks: List[str])", "for task in tasks: if task.startswith(\"sequence\"): uniprot_fetcher.register(SequenceFetcher) if task.startswith(\"subcellular_localization\"): uniprot_fetcher.register(LocalizationFetcher) fetcher.register(uniprot_fetcher) return fetcher", "individually. NOTE: It is assumed that a `benchmark.json` file already exists, with at", "List from geneeval.fetcher.fetchers import Fetcher, LocalizationFetcher, SequenceFetcher, UniprotFetcher class AutoFetcher: \"\"\"A factory function", "that a `benchmark.json` file already exists, with at least the gene IDs present." ]
[ "MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def __init__(self, container_id, partition_key)", "create_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.create_item(item) return item", "SETTINGS = dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') )", "database def get_cosmosdb_container(self) -> ContainerProxy: database = self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key)", "ContainerProxy: database = self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container def", "-> DatabaseProxy: client = self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self) ->", "os, json from typing import Dict, Iterable from azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy,", "= dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class", "= os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def __init__(self, container_id, partition_key) ->", "id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container def create_item_cosmosdb(self, item: Dict) -> Dict: container =", "SETTINGS['MASTER_KEY']} ) return client def get_cosmosdb_database(self) -> DatabaseProxy: client = self.get_cosmosdb_client() database =", ") class DataBaseClient(): def __init__(self, container_id, partition_key) -> None: super().__init__() self.container_id = container_id", "client = self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self) -> ContainerProxy: database", "return database def get_cosmosdb_container(self) -> ContainerProxy: database = self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id,", "self.get_cosmosdb_container() item = container.delete_item(item) return item def get_item_cosmosdb(self, item: Dict) -> Dict: container", "partition_key=PartitionKey(path=self.partition_key) ) return container def create_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container()", "def upsert_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.upsert_item(item) return", "(CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS = dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'),", "upsert_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.upsert_item(item) return item", "import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS = dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY =", "database = self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container def create_item_cosmosdb(self,", "Dict) -> Dict: container = self.get_cosmosdb_container() item = container.read_item(item) return item def query_items_cosmosdb(self,", "= self.get_cosmosdb_container() item = container.create_item(item) return item def upsert_item_cosmosdb(self, item: Dict) -> Dict:", "def query_items_cosmosdb(self, query: str) -> Iterable: container = self.get_cosmosdb_container() items = container.query_items(query, enable_cross_partition_query=True)", ") return container def create_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item", "from typing import Dict, Iterable from azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS", "self.container_id = container_id self.partition_key = partition_key def get_cosmosdb_client(self) -> CosmosClient: client = CosmosClient(", "Dict: container = self.get_cosmosdb_container() item = container.delete_item(item) return item def get_item_cosmosdb(self, item: Dict)", "get_cosmosdb_container(self) -> ContainerProxy: database = self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return", "Dict: container = self.get_cosmosdb_container() item = container.create_item(item) return item def upsert_item_cosmosdb(self, item: Dict)", "database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self) -> ContainerProxy: database = self.get_cosmosdb_database() container", "= client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self) -> ContainerProxy: database = self.get_cosmosdb_database() container =", "partition_key) -> None: super().__init__() self.container_id = container_id self.partition_key = partition_key def get_cosmosdb_client(self) ->", "item = container.create_item(item) return item def upsert_item_cosmosdb(self, item: Dict) -> Dict: container =", "auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client def get_cosmosdb_database(self) -> DatabaseProxy: client = self.get_cosmosdb_client() database", "database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container def create_item_cosmosdb(self, item: Dict) -> Dict: container", "os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def __init__(self, container_id, partition_key) -> None: super().__init__() self.container_id =", "Dict: container = self.get_cosmosdb_container() item = container.read_item(item) return item def query_items_cosmosdb(self, query: str)", "os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def __init__(self, container_id,", "endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client def get_cosmosdb_database(self) -> DatabaseProxy: client = self.get_cosmosdb_client()", "client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self) -> ContainerProxy: database = self.get_cosmosdb_database() container = database.create_container_if_not_exists(", "DatabaseProxy: client = self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self) -> ContainerProxy:", "container.upsert_item(item) return item def delete_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item", "container = self.get_cosmosdb_container() item = container.delete_item(item) return item def get_item_cosmosdb(self, item: Dict) ->", "item = container.delete_item(item) return item def get_item_cosmosdb(self, item: Dict) -> Dict: container =", "ContainerProxy, DatabaseProxy) SETTINGS = dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID =", "def get_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.read_item(item) return", "= os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def __init__(self, container_id, partition_key) -> None: super().__init__() self.container_id", "partition_key def get_cosmosdb_client(self) -> CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return", "= partition_key def get_cosmosdb_client(self) -> CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} )", "Dict) -> Dict: container = self.get_cosmosdb_container() item = container.delete_item(item) return item def get_item_cosmosdb(self,", "self.get_cosmosdb_container() item = container.upsert_item(item) return item def delete_item_cosmosdb(self, item: Dict) -> Dict: container", "container_id, partition_key) -> None: super().__init__() self.container_id = container_id self.partition_key = partition_key def get_cosmosdb_client(self)", "query_items_cosmosdb(self, query: str) -> Iterable: container = self.get_cosmosdb_container() items = container.query_items(query, enable_cross_partition_query=True) return", "azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS = dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY", "= self.get_cosmosdb_container() item = container.delete_item(item) return item def get_item_cosmosdb(self, item: Dict) -> Dict:", "return item def upsert_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item =", "dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient():", "= container_id self.partition_key = partition_key def get_cosmosdb_client(self) -> CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'],", "self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container def create_item_cosmosdb(self, item: Dict)", "container def create_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.create_item(item)", "container_id self.partition_key = partition_key def get_cosmosdb_client(self) -> CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey':", "return item def get_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item =", "def get_cosmosdb_container(self) -> ContainerProxy: database = self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) )", "def __init__(self, container_id, partition_key) -> None: super().__init__() self.container_id = container_id self.partition_key = partition_key", "def get_cosmosdb_client(self) -> CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client", "item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.create_item(item) return item def", "= container.delete_item(item) return item def get_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container()", "query: str) -> Iterable: container = self.get_cosmosdb_container() items = container.query_items(query, enable_cross_partition_query=True) return items", "self.get_cosmosdb_container() item = container.read_item(item) return item def query_items_cosmosdb(self, query: str) -> Iterable: container", "HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def", "= database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container def create_item_cosmosdb(self, item: Dict) -> Dict:", "delete_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.delete_item(item) return item", "Dict, Iterable from azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS = dict( HOST", "class DataBaseClient(): def __init__(self, container_id, partition_key) -> None: super().__init__() self.container_id = container_id self.partition_key", "-> Dict: container = self.get_cosmosdb_container() item = container.delete_item(item) return item def get_item_cosmosdb(self, item:", "Dict: container = self.get_cosmosdb_container() item = container.upsert_item(item) return item def delete_item_cosmosdb(self, item: Dict)", "return container def create_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item =", "= os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def __init__(self,", "= self.get_cosmosdb_container() item = container.read_item(item) return item def query_items_cosmosdb(self, query: str) -> Iterable:", "client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client def get_cosmosdb_database(self) -> DatabaseProxy:", "typing import Dict, Iterable from azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS =", "os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def __init__(self, container_id, partition_key) -> None:", "item def get_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.read_item(item)", "item = container.read_item(item) return item def query_items_cosmosdb(self, query: str) -> Iterable: container =", "= container.create_item(item) return item def upsert_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container()", "-> CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client def get_cosmosdb_database(self)", "container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container def create_item_cosmosdb(self, item: Dict) ->", "item def query_items_cosmosdb(self, query: str) -> Iterable: container = self.get_cosmosdb_container() items = container.query_items(query,", "item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.delete_item(item) return item def", "DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID') ) class DataBaseClient(): def __init__(self, container_id, partition_key) -> None: super().__init__()", "super().__init__() self.container_id = container_id self.partition_key = partition_key def get_cosmosdb_client(self) -> CosmosClient: client =", "-> ContainerProxy: database = self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container", "def get_cosmosdb_database(self) -> DatabaseProxy: client = self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def", "item = container.upsert_item(item) return item def delete_item_cosmosdb(self, item: Dict) -> Dict: container =", "container.create_item(item) return item def upsert_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item", "return item def delete_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item =", "DatabaseProxy) SETTINGS = dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID = os.getenv('COSMOSDB_DATABASE_ID')", "Dict) -> Dict: container = self.get_cosmosdb_container() item = container.upsert_item(item) return item def delete_item_cosmosdb(self,", "return client def get_cosmosdb_database(self) -> DatabaseProxy: client = self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return", "def delete_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.delete_item(item) return", "client def get_cosmosdb_database(self) -> DatabaseProxy: client = self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database", "return item def query_items_cosmosdb(self, query: str) -> Iterable: container = self.get_cosmosdb_container() items =", "-> Dict: container = self.get_cosmosdb_container() item = container.read_item(item) return item def query_items_cosmosdb(self, query:", "-> None: super().__init__() self.container_id = container_id self.partition_key = partition_key def get_cosmosdb_client(self) -> CosmosClient:", "item def delete_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.delete_item(item)", "get_cosmosdb_client(self) -> CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client def", "-> Dict: container = self.get_cosmosdb_container() item = container.create_item(item) return item def upsert_item_cosmosdb(self, item:", "def create_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.create_item(item) return", "= container.read_item(item) return item def query_items_cosmosdb(self, query: str) -> Iterable: container = self.get_cosmosdb_container()", "container.delete_item(item) return item def get_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item", ") return client def get_cosmosdb_database(self) -> DatabaseProxy: client = self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID'])", "item def upsert_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.upsert_item(item)", "item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.read_item(item) return item def", "container.read_item(item) return item def query_items_cosmosdb(self, query: str) -> Iterable: container = self.get_cosmosdb_container() items", "from azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS = dict( HOST = os.getenv('COSMOSDB_HOST'),", "import Dict, Iterable from azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS = dict(", "get_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.read_item(item) return item", "container = self.get_cosmosdb_container() item = container.create_item(item) return item def upsert_item_cosmosdb(self, item: Dict) ->", "container = self.get_cosmosdb_container() item = container.upsert_item(item) return item def delete_item_cosmosdb(self, item: Dict) ->", "self.get_cosmosdb_container() item = container.create_item(item) return item def upsert_item_cosmosdb(self, item: Dict) -> Dict: container", "Iterable from azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS = dict( HOST =", "PartitionKey, ContainerProxy, DatabaseProxy) SETTINGS = dict( HOST = os.getenv('COSMOSDB_HOST'), MASTER_KEY = os.getenv('COSMOSDB_MASTER_KEY'), DATABASE_ID", "CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client def get_cosmosdb_database(self) ->", "DataBaseClient(): def __init__(self, container_id, partition_key) -> None: super().__init__() self.container_id = container_id self.partition_key =", "self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self) -> ContainerProxy: database = self.get_cosmosdb_database()", "CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client def get_cosmosdb_database(self) -> DatabaseProxy: client =", "= CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']} ) return client def get_cosmosdb_database(self) -> DatabaseProxy: client", "-> Dict: container = self.get_cosmosdb_container() item = container.upsert_item(item) return item def delete_item_cosmosdb(self, item:", "container = self.get_cosmosdb_container() item = container.read_item(item) return item def query_items_cosmosdb(self, query: str) ->", "json from typing import Dict, Iterable from azure.cosmos import (CosmosClient, PartitionKey, ContainerProxy, DatabaseProxy)", "= self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self) -> ContainerProxy: database =", "__init__(self, container_id, partition_key) -> None: super().__init__() self.container_id = container_id self.partition_key = partition_key def", "self.partition_key = partition_key def get_cosmosdb_client(self) -> CosmosClient: client = CosmosClient( endpoint_url=SETTINGS['HOST'], auth={'masterKey': SETTINGS['MASTER_KEY']}", "Dict) -> Dict: container = self.get_cosmosdb_container() item = container.create_item(item) return item def upsert_item_cosmosdb(self,", "= self.get_cosmosdb_database() container = database.create_container_if_not_exists( id=self.container_id, partition_key=PartitionKey(path=self.partition_key) ) return container def create_item_cosmosdb(self, item:", "get_cosmosdb_database(self) -> DatabaseProxy: client = self.get_cosmosdb_client() database = client.create_database_if_not_exists(SETTINGS['DATABASE_ID']) return database def get_cosmosdb_container(self)", "= container.upsert_item(item) return item def delete_item_cosmosdb(self, item: Dict) -> Dict: container = self.get_cosmosdb_container()", "= self.get_cosmosdb_container() item = container.upsert_item(item) return item def delete_item_cosmosdb(self, item: Dict) -> Dict:", "None: super().__init__() self.container_id = container_id self.partition_key = partition_key def get_cosmosdb_client(self) -> CosmosClient: client", "import os, json from typing import Dict, Iterable from azure.cosmos import (CosmosClient, PartitionKey,", "item: Dict) -> Dict: container = self.get_cosmosdb_container() item = container.upsert_item(item) return item def" ]
[ "= children \"\"\" VARIABLES Methods for handling variables, the basic stores for actually", "Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names = [var.name for var in", "given node. \"\"\" return create_computational_graph(self) def get_comp_table(self): \"\"\" Creates a computational table for", "def set_value(self, value): self._value = None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate", "in self.children] grad_value = self._grad_value results = fn(self, values, grad_value) # Need to", "results return wrapper class Node(): \"\"\" Class Node Base Node implementation. \"\"\" def", "grad_value): numer, denom = values numer_out = np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom,", "for value in values: new = value if not isinstance(new, Node): new =", "%r, Value = %r, Derivative = %r)' % (self.type, self.value(), self.derivative()) return output", "0 self._cur_grad_count = 0 self._grad_count = 0 for child in self.children: child.zero_grad_values() def", "node by 1 self.add_grad_contribution(1) self.reverse() # Now set the results self._derivative = {}", "in input_dict: raise TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self def __call__(self, *args,", "derivative def set_value(self, value): self._value = None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) #", "'Variable' self._variables[name] = self self.var_idx = -1 def eval(self): if self.value() is None:", "powered) # Second term term2 = 0 # if exp_prime != 0: #", "self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0 for child in self.children:", "values. Inputs methods: -Dictionary of {variable_name: value, ...} -Keyword arguments of compute(variable_vame=value, ...)", "at the given variable values. Inputs methods: -Dictionary of {variable_name: value, ...} -Keyword", "# self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\" Reset vectors of derivatives", "# Compute derivatives based on mode if settings.current_mode() == \"forward\": for var in", "for clarity and to serve as a decorator factory. Note: the class implementation", "for child in self.children: child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate dependency counts \"\"\" self._grad_count", "== 0: input_dict = kwargs elif len(args) == 1: input_dict = args[0] if", "and derivatives of any given node. \"\"\" def update_variables(self): \"\"\" Update current variable", "\"\"\" class Variable(Node): \"\"\" Node representing a symbolic variable. Serves as the basis", "of automatic differentiation. These include keeping track of whether or not any node", "node def __rtruediv__(self, value): node = self.make_node(Division(), value, self) return node def __pow__(self,", "def set_children(self, *children): self.children = children \"\"\" VARIABLES Methods for handling variables, the", "# Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): numer, denom = values numer_out", "to propagate results (functions need to return same # of results as children)", "\"\"\" Decorator for computation functions. Implemented as a class for clarity and to", "= 0 self._cur_grad_count = 0 self._grad_count = 0 @classmethod def make_constant(cls, value): return", "def reverse_wrapper(self, fn): \"\"\" Wrapper for updating gradients in reverse pass. \"\"\" @wraps(fn)", "self.set_value(input_dict[self.name]) self.set_derivative(1); return self def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) # Reverse", "derivative(self): return self._derivative def set_value(self, value): if not isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value", "var in variables] self._variables = dict(zip(variable_names, variables)) def set_variables(self, input_dict): \"\"\" Set variables", "node derivatives. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child in self.children]", "values, grad_value): raise NotImplementedError def get_comp_graph(self): \"\"\" Creates a computational graph for a", "-1 def eval(self): if self.value() is None: raise NoValueError('Variable %s has been given", "Calculate dependency counts \"\"\" self._grad_count += 1 for child in self.children: child.set_grad_count() def", "set_derivative(self, value): if isinstance(self.value(), np.ndarray): self._derivative[:] = value else: self._derivative = value #", "\"\"\" Class Node Base Node implementation. \"\"\" def __init__(self): self._value = None self._derivative", "self.make_node(Negation(), self) return node def __sub__(self, value): node = self.make_node(Subtraction(), self, value) return", "for handling variables, the basic stores for actually computing the values and derivatives", "in self.children] diffs = [child.diff() for child in self.children] result = fn(self, values,", "\"\"\" REVERSE MODE Helper functions for properly doing the reverse mode of automatic", "-Dictionary of {variable_name: value, ...} -Keyword arguments of compute(variable_vame=value, ...) \"\"\" if len(args)", "for properly doing the reverse mode of automatic differentiation. These include keeping track", "this # consistent, but would increase computation; elegance tradeoff def set_derivative(self, value): if", "eval(self, values): left, right = values return np.add(left, right) @node_decorate('differentiate') def diff(self, values,", "self.make_node(Division(), self, value) return node def __rtruediv__(self, value): node = self.make_node(Division(), value, self)", "def __call__(self, fn): return self.wrapper(fn) def eval_wrapper(self, fn): \"\"\" Wrapper for updating node", "fn): \"\"\" Wrapper for updating node derivatives. \"\"\" @wraps(fn) def wrapper(self): values =", "\"\"\" Custom exceptions. \"\"\" class NoValueError(Exception): pass class node_decorate(): \"\"\" Decorator for computation", "self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode] def __call__(self, fn): return self.wrapper(fn) def eval_wrapper(self,", "@classmethod def make_constant(cls, value): return Constant(value) @classmethod def make_node(cls, node, *values): new_nodes =", "self.children: child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count == self._grad_count) def add_grad_contribution(self, value): # Keep", "diff(self, values, diffs): return -1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value):", "at decoration, since we have another function layer outside now). \"\"\" def __init__(self,", "would expect in symbolic computation. \"\"\" def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs)", "compute to treating them as one would expect in symbolic computation. \"\"\" def", "final output values. \"\"\" def __init__(self, name=None): super().__init__() if name is None or", "class Power(Node): def __init__(self): super().__init__() self.type = 'Power' @node_decorate('evaluate') def eval(self, values): base,", "of any given node. \"\"\" def update_variables(self): \"\"\" Update current variable list to", "outside now). \"\"\" def __init__(self, mode): # Maintain function metadata (doctstrings, etc.) with", "\"\"\" @wraps(fn) def wrapper(self): # Check that we've received all the dependencies we", "-Keyword arguments of compute(variable_vame=value, ...) \"\"\" if len(args) == 0: input_dict = kwargs", "node = self.make_node(Negation(), self) return node def __sub__(self, value): node = self.make_node(Subtraction(), self,", "return (left_out, right_out) class Division(Node): def __init__(self): super().__init__() self.type = 'Division' @node_decorate('evaluate') def", "implemented by subclasses. Use the node_decorate decorator to update node values upon computation.", "self # Uncomment when overriding: # @node_decorate('evaluate') def eval(self, values): raise NotImplementedError #", "return self # Uncomment when overriding: # @node_decorate('evaluate') def eval(self, values): raise NotImplementedError", "sweep before reverse, assume values exist values = [child.value() for child in self.children]", "Class Node Base Node implementation. \"\"\" def __init__(self): self._value = None self._derivative =", "self) return node def __eq__(self,other): return self.value() == other.value() and self.derivative() == other.derivative()", "*args, **kwargs): return self.compute(*args, **kwargs) # Reverse mode doesn't need to do anything,", "Constant(Node): \"\"\" Node representing a constant. Always initiated with 0 derivative. \"\"\" def", "self.derivative()) return output def __add__(self, value): node = self.make_node(Addition(), self, value) return node", "variables; I could keep this # consistent, but would increase computation; elegance tradeoff", "__add__(self, value): node = self.make_node(Addition(), self, value) return node def __radd__(self, value): node", "def __init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0) self.type = 'Constant' def set_derivative(self, value): self._derivative", "value, ...} -Keyword arguments of compute(variable_vame=value, ...) \"\"\" if len(args) == 0: input_dict", "if name is None or not isinstance(name, str): raise ValueError('Name must be given", "value): self._derivative = value def eval(self): return self.value() def diff(self): return self.derivative() #", "__radd__(self, value): node = self.make_node(Addition(), value, self) return node def __neg__(self): node =", "as a class for clarity and to serve as a decorator factory. Note:", "child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate dependency counts \"\"\" self._grad_count += 1 for child", "value): node = self.make_node(Subtraction(), value, self) return node def __mul__(self, value): node =", "the basis of evaluation, and then propagates values through the graph to the", "**kwargs): return self.compute(*args, **kwargs) # Reverse mode doesn't need to do anything, no", "diffs): left, right = diffs return np.add(left, right) # Reverse mode @node_decorate('reverse') def", "{} for key, var in self._variables.items(): self._derivative[key] = var._grad_value return self # Uncomment", "Compute only if necessary, otherwise we run into log(-c) issues temp_base = np.copy(base)", "%s has been given no value.' % self.name) return self.value() def diff(self): if", "given node. \"\"\" return create_computational_table(self) \"\"\" SUBCLASSES Node subclasses that define operations or", "the case the decorator pattern takes arguments (__call__ is called only once at", "= cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node \"\"\" MAGIC Various implementations to improve", "term2 = 0 # if exp_prime != 0: # Compute only if necessary,", "based on mode if settings.current_mode() == \"forward\": for var in self.iterate_seeds(): self.diff() else:", "def __add__(self, value): node = self.make_node(Addition(), self, value) return node def __radd__(self, value):", "= args[0] if input_dict.keys() != self._variables.keys(): raise TypeError('Input not recognized.') # Compute the", "**kwargs): return self.compute(*args, **kwargs) def __repr__(self): output = 'Node(Function = %r, Value =", "in self.children] result = fn(self, values) self.set_value(result) return result return wrapper def diff_wrapper(self,", "self.type = 'Division' @node_decorate('evaluate') def eval(self, values): if values[1] == 0: raise ZeroDivisionError('Division", "1 self.add_grad_contribution(1) self.reverse() # Now set the results self._derivative = {} for key,", "node_decorate decorator to update node values upon computation. \"\"\" def compute(self, *args, **kwargs):", "return id(self) \"\"\" ATTRIBUTES Methods for setting and getting attributes. \"\"\" def value(self):", "node = self.make_node(Division(), self, value) return node def __rtruediv__(self, value): node = self.make_node(Division(),", "= self.make_node(Addition(), self, value) return node def __radd__(self, value): node = self.make_node(Addition(), value,", "def wrapper(self): # Check that we've received all the dependencies we need if", "set the derivative def set_value(self, value): self._value = None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size))", "the decorator pattern takes arguments (__call__ is called only once at decoration, since", "= np.multiply(right, grad_value) right_out = np.multiply(left, grad_value) return (left_out, right_out) class Division(Node): def", "NotImplementedError # Uncomment when overriding: # @node_decorate('reverse') def reverse(self, values, grad_value): raise NotImplementedError", "contribution self._cur_grad_count += 1 self._grad_value += value \"\"\" COMPUTATION Actual computation functions, with", "return term1+term2 # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): base, exp =", "__rtruediv__(self, value): node = self.make_node(Division(), value, self) return node def __pow__(self, value): node", "= 0 self._cur_grad_count = 0 self._grad_count = 0 for child in self.children: child.zero_grad_values()", "name is None or not isinstance(name, str): raise ValueError('Name must be given for", "Subtraction(Node): def __init__(self): super().__init__() self.type = 'Subtraction' @node_decorate('evaluate') def eval(self, values): # values", "is None or not isinstance(name, str): raise ValueError('Name must be given for variable.')", "self.type = 'Subtraction' @node_decorate('evaluate') def eval(self, values): # values vector respects order return", "self._variables.items(): self._derivative[key] = var._grad_value return self # Uncomment when overriding: # @node_decorate('evaluate') def", "now). \"\"\" def __init__(self, mode): # Maintain function metadata (doctstrings, etc.) with wraps", "of this node, which assign seed values to variables to compute all partials.", "return self.derivative() # Override dict functionality for variables; I could keep this #", "%r, Derivative = %r)' % (self.type, self.value(), self.derivative()) return output def __add__(self, value):", "I could keep this # consistent, but would increase computation; elegance tradeoff def", "this node, which assign seed values to variables to compute all partials. \"\"\"", "# @node_decorate('reverse') def reverse(self, values, grad_value): raise NotImplementedError def get_comp_graph(self): \"\"\" Creates a", "2)) return (numer_out, denom_out) class Power(Node): def __init__(self): super().__init__() self.type = 'Power' @node_decorate('evaluate')", "COMPUTATION Actual computation functions, with eval and diff to be implemented by subclasses.", "def get_comp_table(self): \"\"\" Creates a computational table for a given node. \"\"\" return", "*values): new_nodes = [] for value in values: new = value if not", "value): var = self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var] = value else: # if", "+= 1 self._grad_value += value \"\"\" COMPUTATION Actual computation functions, with eval and", "__hash__(self): return id(self) \"\"\" ATTRIBUTES Methods for setting and getting attributes. \"\"\" def", "diffs): return np.subtract(diffs[0], diffs[1]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return", "the results self._derivative = {} for key, var in self._variables.items(): self._derivative[key] = var._grad_value", "variable def compute(self, *args, **kwargs): if len(args) == 0: input_dict = kwargs elif", "values = [child.value() for child in self.children] grad_value = self._grad_value results = fn(self,", "[var.name for var in variables] self._variables = dict(zip(variable_names, variables)) def set_variables(self, input_dict): \"\"\"", "do anything, no children @node_decorate('reverse') def reverse(self, values, grad_value): return () class Addition(Node):", "__truediv__(self, value): node = self.make_node(Division(), self, value) return node def __rtruediv__(self, value): node", "\"\"\" Set variables for evaluation. \"\"\" for key, value in input_dict.items(): self._variables[key].set_value(value) #", "Node Base Node implementation. \"\"\" def __init__(self): self._value = None self._derivative = {}", "self.set_value(result) return result return wrapper def diff_wrapper(self, fn): \"\"\" Wrapper for updating node", "[] for value in values: new = value if not isinstance(new, Node): new", "\"\"\" if len(args) == 0: input_dict = kwargs elif len(args) == 1: input_dict", "vector position def iterate_idxs(self): for i in range(self._value.size): self.set_derivative(0) self.var_idx = i self._derivative[i]", "SUBCLASSES Node subclasses that define operations or single values, such as variables and", "@node_decorate('differentiate') def diff(self, values, diffs): base, exp = values b_prime, exp_prime = diffs", "def __hash__(self): return id(self) \"\"\" ATTRIBUTES Methods for setting and getting attributes. \"\"\"", "0 for child in self.children: child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate dependency counts \"\"\"", "0: input_dict = kwargs elif len(args) == 1: input_dict = args[0] if input_dict.keys()", "MODE Helper functions for properly doing the reverse mode of automatic differentiation. These", "received all the dependencies we need if not self.ready_to_reverse(): return # We need", "other def __hash__(self): return id(self) \"\"\" ATTRIBUTES Methods for setting and getting attributes.", "a computational graph for a given node. \"\"\" return create_computational_graph(self) def get_comp_table(self): \"\"\"", "def zero_vector_derivative(self, input_dict): \"\"\" Reset vectors of derivatives recursively in children \"\"\" if", "return Constant(value) @classmethod def make_node(cls, node, *values): new_nodes = [] for value in", "implementations to improve the interface of the package, from calling nodes directly to", "its children, and managing these contributions. \"\"\" def zero_grad_values(self): \"\"\" Reset all partial", "run into log(-c) issues temp_base = np.copy(base) temp_base[temp_base<=0] = 1 coef = np.multiply(np.log(temp_base),", "for var in self._variables: # Reset derivatives for v in self._variables: self._variables[v].set_derivative(0) if", "return self._variables[v] def iterate_seeds(self): \"\"\" Generator to iterate over all variables of this", "current node by 1 self.add_grad_contribution(1) self.reverse() # Now set the results self._derivative =", "def __init__(self, name=None): super().__init__() if name is None or not isinstance(name, str): raise", "raise TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self def __call__(self, *args, **kwargs): return", "values b_prime, exp_prime = diffs # First term coef = np.multiply(exp, b_prime) powered", "reverse(self, values, grad_value): return (grad_value, -grad_value) class Multiplication(Node): def __init__(self): super().__init__() self.type =", "Reset vectors of derivatives recursively in children \"\"\" if type(self) != Variable: for", "as a decorator factory. Note: the class implementation of decorators behaves very differently", "for node in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for v in self._variables: if np.any(self._variables[v].derivative()):", "seed values to variables to compute all partials. \"\"\" for var in self._variables:", "exp = values return np.power(base, exp) @node_decorate('differentiate') def diff(self, values, diffs): base, exp", "powered = np.power(base, np.subtract(exp, 1)) term1 = np.multiply(coef, powered) # Second term term2", "a constant. Always initiated with 0 derivative. \"\"\" def __init__(self, value): super().__init__() self.set_value(value)", "node def __truediv__(self, value): node = self.make_node(Division(), self, value) return node def __rtruediv__(self,", "graph for a given node. \"\"\" return create_computational_graph(self) def get_comp_table(self): \"\"\" Creates a", "for evaluation. \"\"\" for key, value in input_dict.items(): self._variables[key].set_value(value) # if isinstance(value, np.ndarray):", "input_dict): \"\"\" Set variables for evaluation. \"\"\" for key, value in input_dict.items(): self._variables[key].set_value(value)", "__init__(self): super().__init__() self.type = 'Negation' @node_decorate('evaluate') def eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate') def", "Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): base, exp = values base_out =", "given node. \"\"\" def update_variables(self): \"\"\" Update current variable list to reflect all", "@node_decorate('differentiate') def diff(self, values, diffs): num = np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1]) denom", "+= 1 for child in self.children: child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count == self._grad_count)", "= 'Multiplication' @node_decorate('evaluate') def eval(self, values): return np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self, values,", "new_vars.append(child) else: new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names = [var.name for var in variables]", "any given node. \"\"\" def update_variables(self): \"\"\" Update current variable list to reflect", "# values vector respects order return np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs):", "raise NotImplementedError def get_comp_graph(self): \"\"\" Creates a computational graph for a given node.", "'Node(Function = %r, Value = %r, Derivative = %r)' % (self.type, self.value(), self.derivative())", "the final output values. \"\"\" def __init__(self, name=None): super().__init__() if name is None", "and differentiate at the given variable values. Inputs methods: -Dictionary of {variable_name: value,", "= {} for key, var in self._variables.items(): self._derivative[key] = var._grad_value return self #", "clarity and to serve as a decorator factory. Note: the class implementation of", "# We need to have done first sweep before reverse, assume values exist", "diffs[1]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, -grad_value) class", "name=None): super().__init__() if name is None or not isinstance(name, str): raise ValueError('Name must", "= [] for value in values: new = value if not isinstance(new, Node):", "np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out, denom_out) class Power(Node): def", "raise TypeError('Value must be numeric or a numpy array.') self._value = value def", "None self.children = [] # Name of type of node self.type = 'None'", "__init__(self, mode): # Maintain function metadata (doctstrings, etc.) with wraps self.factory = {'evaluate':", "self.children: if isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names = [var.name", "in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return wrapper class Node(): \"\"\" Class Node", "\"\"\" Calculate dependency counts \"\"\" self._grad_count += 1 for child in self.children: child.set_grad_count()", "First term coef = np.multiply(exp, b_prime) powered = np.power(base, np.subtract(exp, 1)) term1 =", "self.name) return self.derivative() # Override dict functionality for variables; I could keep this", "def __truediv__(self, value): node = self.make_node(Division(), self, value) return node def __rtruediv__(self, value):", "return np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.subtract(diffs[0], diffs[1]) # Reverse", "self.make_node(Addition(), self, value) return node def __radd__(self, value): node = self.make_node(Addition(), value, self)", "iterate_idxs(self): for i in range(self._value.size): self.set_derivative(0) self.var_idx = i self._derivative[i] = 1 yield", "0 derivative. \"\"\" def __init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0) self.type = 'Constant' def", "\"\"\" def __init__(self, name=None): super().__init__() if name is None or not isinstance(name, str):", "node def __rsub__(self, value): node = self.make_node(Subtraction(), value, self) return node def __mul__(self,", "setting and getting attributes. \"\"\" def value(self): return self._value def derivative(self): return self._derivative", "# self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self, *children): self.children = children", "all variables of this node, which assign seed values to variables to compute", "Inputs methods: -Dictionary of {variable_name: value, ...} -Keyword arguments of compute(variable_vame=value, ...) \"\"\"", "node = self.make_node(Addition(), self, value) return node def __radd__(self, value): node = self.make_node(Addition(),", "% (self.type, self.value(), self.derivative()) return output def __add__(self, value): node = self.make_node(Addition(), self,", "Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, grad_value) class Negation(Node): def", "powered = np.power(base, exp) term2 = np.multiply(coef, powered) return term1+term2 # Reverse mode", "values, diffs): return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0]) # Reverse mode @node_decorate('reverse') def", "TypeError('Value must be numeric or a numpy array.') self._value = value def set_derivative(self,", "value) return node def __rpow__(self, value): node = self.make_node(Power(), value, self) return node", "values[1] == 0: raise ZeroDivisionError('Division by zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self,", "__call__(self, *args, **kwargs): return self.compute(*args, **kwargs) def __repr__(self): output = 'Node(Function = %r,", "Need to propagate results (functions need to return same # of results as", "actually computing the values and derivatives of any given node. \"\"\" def update_variables(self):", "return result return wrapper def reverse_wrapper(self, fn): \"\"\" Wrapper for updating gradients in", "= np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self, *children): self.children = children \"\"\" VARIABLES", "Node subclasses that define operations or single values, such as variables and constants.", "of evaluation, and then propagates values through the graph to the final output", "def diff(self): if self.derivative() is None: raise NoValueError('Variable %s has been given no", "settings \"\"\" Custom exceptions. \"\"\" class NoValueError(Exception): pass class node_decorate(): \"\"\" Decorator for", "= [child.eval() for child in self.children] result = fn(self, values) self.set_value(result) return result", "of whether or not any node is ready to compute its contributions to", "self.name) return self.value() def diff(self): if self.derivative() is None: raise NoValueError('Variable %s has", "functions. Implemented as a class for clarity and to serve as a decorator", "diff_wrapper(self, fn): \"\"\" Wrapper for updating node derivatives. \"\"\" @wraps(fn) def wrapper(self): values", "node = self.make_node(Subtraction(), value, self) return node def __mul__(self, value): node = self.make_node(Multiplication(),", "children. \"\"\" new_vars = [] for child in self.children: if isinstance(child, Variable): new_vars.append(child)", "def reverse(self, values, grad_value): return (grad_value, -grad_value) class Multiplication(Node): def __init__(self): super().__init__() self.type", "len(args) == 1: input_dict = args[0] if input_dict.keys() != self._variables.keys(): raise TypeError('Input not", "self._derivative = value def eval(self): return self.value() def diff(self): return self.derivative() # Reverse", "include keeping track of whether or not any node is ready to compute", "no children @node_decorate('reverse') def reverse(self, values, grad_value): return () class Addition(Node): def __init__(self):", "@node_decorate('reverse') def reverse(self, values, grad_value): return (-1*np.array(grad_value),) class Subtraction(Node): def __init__(self): super().__init__() self.type", "if self._cur_var not in self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def", "Differentiation \"\"\" from functools import wraps import numpy as np import numbers from", "def set_grad_count(self): \"\"\" Calculate dependency counts \"\"\" self._grad_count += 1 for child in", "+ np.multiply(diffs[1], values[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): left, right", "new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node \"\"\" MAGIC Various implementations to improve the interface", "computing the values and derivatives of any given node. \"\"\" def update_variables(self): \"\"\"", "\"\"\" Reset all partial contributions for reverse pass \"\"\" self._grad_value = 0 self._cur_grad_count", "def set_derivative(self, value): var = self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var] = value else:", "symbolic variable. Serves as the basis of evaluation, and then propagates values through", "def value(self): return self._value def derivative(self): return self._derivative def set_value(self, value): if not", "vectors of derivatives recursively in children \"\"\" if type(self) != Variable: for key,", "fn(self, values, grad_value) # Need to propagate results (functions need to return same", "otherwise we run into log(-c) issues temp_base = np.copy(base) temp_base[temp_base<=0] = 1 coef", "if not isinstance(new, Node): new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node \"\"\"", "the basic stores for actually computing the values and derivatives of any given", "= value def set_derivative(self, value): var = self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var] =", "computation; elegance tradeoff def set_derivative(self, value): if isinstance(self.value(), np.ndarray): self._derivative[:] = value else:", "child in self.children] diffs = [child.diff() for child in self.children] result = fn(self,", "def wrapper(self): values = [child.eval() for child in self.children] result = fn(self, values)", "self self.var_idx = -1 def eval(self): if self.value() is None: raise NoValueError('Variable %s", "gradients in reverse pass. \"\"\" @wraps(fn) def wrapper(self): # Check that we've received", "return node def __sub__(self, value): node = self.make_node(Subtraction(), self, value) return node def", "def __neg__(self): node = self.make_node(Negation(), self) return node def __sub__(self, value): node =", "for idx in self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1) yield var \"\"\" REVERSE MODE", "isinstance(value, numbers.Number): self._derivative[self._cur_var] = value else: # if self._cur_var not in self._derivative: #", "ready to compute its contributions to its children, and managing these contributions. \"\"\"", "eval(self, values): # values vector respects order return np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self,", "diffs # First term coef = np.multiply(exp, b_prime) powered = np.power(base, np.subtract(exp, 1))", "self def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) # Reverse mode doesn't need", "Power(Node): def __init__(self): super().__init__() self.type = 'Power' @node_decorate('evaluate') def eval(self, values): base, exp", "contributions to its children, and managing these contributions. \"\"\" def zero_grad_values(self): \"\"\" Reset", "node = self.make_node(Power(), self, value) return node def __rpow__(self, value): node = self.make_node(Power(),", "= 'Subtraction' @node_decorate('evaluate') def eval(self, values): # values vector respects order return np.subtract(values[0],", "self._derivative = {} self._variables = {} self._cur_var = None self.children = [] #", "partials. \"\"\" for var in self._variables: # Reset derivatives for v in self._variables:", "key, var in self._variables.items(): self._derivative[key] = var._grad_value return self # Uncomment when overriding:", "return self._value def derivative(self): return self._derivative def set_value(self, value): if not isinstance(value, (numbers.Number,", "self._grad_count) def add_grad_contribution(self, value): # Keep track of addition contribution self._cur_grad_count += 1", "updating gradients in reverse pass. \"\"\" @wraps(fn) def wrapper(self): # Check that we've", "methods: -Dictionary of {variable_name: value, ...} -Keyword arguments of compute(variable_vame=value, ...) \"\"\" if", "'Addition' @node_decorate('evaluate') def eval(self, values): left, right = values return np.add(left, right) @node_decorate('differentiate')", "key, value in input_dict.items(): self._variables[key].set_value(value) # if isinstance(value, np.ndarray): # self._derivative[key] = np.zeros(value.size)", "return self.derivative() # Reverse mode doesn't need to do anything, no children @node_decorate('reverse')", "value \"\"\" COMPUTATION Actual computation functions, with eval and diff to be implemented", "metadata (doctstrings, etc.) with wraps self.factory = {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper}", "on mode if settings.current_mode() == \"forward\": for var in self.iterate_seeds(): self.diff() else: #", "= np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2 if denom == 0:", "isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value must be numeric or a numpy array.') self._value", "exp = values b_prime, exp_prime = diffs # First term coef = np.multiply(exp,", "return wrapper def diff_wrapper(self, fn): \"\"\" Wrapper for updating node derivatives. \"\"\" @wraps(fn)", "= self.make_node(Addition(), value, self) return node def __neg__(self): node = self.make_node(Negation(), self) return", "in reverse pass. \"\"\" @wraps(fn) def wrapper(self): # Check that we've received all", "computational table for a given node. \"\"\" return create_computational_table(self) \"\"\" SUBCLASSES Node subclasses", "name self.type = 'Variable' self._variables[name] = self self.var_idx = -1 def eval(self): if", "(-1*np.array(grad_value),) class Subtraction(Node): def __init__(self): super().__init__() self.type = 'Subtraction' @node_decorate('evaluate') def eval(self, values):", "and diff to be implemented by subclasses. Use the node_decorate decorator to update", "# Maintain function metadata (doctstrings, etc.) with wraps self.factory = {'evaluate': self.eval_wrapper, 'differentiate':", "raise ZeroDivisionError('Division by zero.') return np.divide(num, denom) # Reverse mode @node_decorate('reverse') def reverse(self,", "keeping track of whether or not any node is ready to compute its", "numer_out = np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out, denom_out) class", "self.children[idx].reverse() return results return wrapper class Node(): \"\"\" Class Node Base Node implementation.", "values[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): left, right = values", "as variables and constants. \"\"\" class Variable(Node): \"\"\" Node representing a symbolic variable.", "NoValueError(Exception): pass class node_decorate(): \"\"\" Decorator for computation functions. Implemented as a class", "raise TypeError('Input not recognized.') # Compute the value at this node self.set_variables(input_dict) self.eval()", "and constants. \"\"\" class Variable(Node): \"\"\" Node representing a symbolic variable. Serves as", "make_node(cls, node, *values): new_nodes = [] for value in values: new = value", "Creates a computational table for a given node. \"\"\" return create_computational_table(self) \"\"\" SUBCLASSES", "if necessary, otherwise we run into log(-c) issues temp_base = np.copy(base) temp_base[temp_base<=0] =", "child in self.children: if isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names", "return np.divide(num, denom) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): numer, denom", "evaluation, and then propagates values through the graph to the final output values.", "make_constant(cls, value): return Constant(value) @classmethod def make_node(cls, node, *values): new_nodes = [] for", "def __mul__(self, value): node = self.make_node(Multiplication(), self, value) return node def __rmul__(self, value):", "grad_value) class Negation(Node): def __init__(self): super().__init__() self.type = 'Negation' @node_decorate('evaluate') def eval(self, values):", "ATTRIBUTES Methods for setting and getting attributes. \"\"\" def value(self): return self._value def", "self.derivative() == other.derivative() def __ne__(self, other): return not self == other def __hash__(self):", "return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values, diffs): return -1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse')", "if type(self) != Variable: for key, value in input_dict.items(): if isinstance(value, np.ndarray) and", "np.ndarray): self._derivative[:] = value else: self._derivative = value # On value set, needs", "# if isinstance(value, np.ndarray): # self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\"", "value # On value set, needs to set the derivative def set_value(self, value):", "the node_decorate decorator to update node values upon computation. \"\"\" def compute(self, *args,", "whether or not any node is ready to compute its contributions to its", "directly to compute to treating them as one would expect in symbolic computation.", "getting attributes. \"\"\" def value(self): return self._value def derivative(self): return self._derivative def set_value(self,", "Node implementation. \"\"\" def __init__(self): self._value = None self._derivative = {} self._variables =", "self.type = 'Multiplication' @node_decorate('evaluate') def eval(self, values): return np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self,", "idx else: self._variables[var].set_derivative(1) yield var \"\"\" REVERSE MODE Helper functions for properly doing", "return node def __rsub__(self, value): node = self.make_node(Subtraction(), value, self) return node def", "NotImplementedError def get_comp_graph(self): \"\"\" Creates a computational graph for a given node. \"\"\"", "list(set(new_vars)) variable_names = [var.name for var in variables] self._variables = dict(zip(variable_names, variables)) def", "values): return np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.multiply(diffs[0], values[1]) +", "diffs[1]) denom = np.array(values[1])**2 if denom == 0: raise ZeroDivisionError('Division by zero.') return", "np.array(values[1])**2 if denom == 0: raise ZeroDivisionError('Division by zero.') return np.divide(num, denom) #", "for child in self.children] result = fn(self, values) self.set_value(result) return result return wrapper", "self.set_grad_count() # Seeding output, current node by 1 self.add_grad_contribution(1) self.reverse() # Now set", "self._derivative = value # On value set, needs to set the derivative def", "right_out) class Division(Node): def __init__(self): super().__init__() self.type = 'Division' @node_decorate('evaluate') def eval(self, values):", "called only once at decoration, since we have another function layer outside now).", "to the final output values. \"\"\" def __init__(self, name=None): super().__init__() if name is", "np.ndarray): for idx in self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1) yield var \"\"\" REVERSE", "same # of results as children) for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return", "implementation. \"\"\" def __init__(self): self._value = None self._derivative = {} self._variables = {}", "*args, **kwargs): if len(args) == 0: input_dict = kwargs elif len(args) == 1:", "variable. Serves as the basis of evaluation, and then propagates values through the", "set the results self._derivative = {} for key, var in self._variables.items(): self._derivative[key] =", "self.diff() else: # Reverse mode self.zero_grad_values() # Get proper contribution counts self.set_grad_count() #", "or a numpy array.') self._value = value def set_derivative(self, value): var = self.update_cur_var()", "values: new = value if not isinstance(new, Node): new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes)", "type of node self.type = 'None' # Reverse mode self._grad_value = 0 self._cur_grad_count", "def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) # Reverse mode doesn't need to", ".settings import settings \"\"\" Custom exceptions. \"\"\" class NoValueError(Exception): pass class node_decorate(): \"\"\"", "array.') self._value = value def set_derivative(self, value): var = self.update_cur_var() if isinstance(value, numbers.Number):", "to set the derivative def set_value(self, value): self._value = None if isinstance(value, np.ndarray):", "set_value(self, value): if not isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value must be numeric or", "input_dict.items(): if isinstance(value, np.ndarray) and key in self._variables: self._derivative[key] = np.zeros(value.size) for node", "in self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self, *children): self.children", "children) for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return wrapper class Node():", "anything, no children @node_decorate('reverse') def reverse(self, values, grad_value): return () class Constant(Node): \"\"\"", "to reflect all variables necessary in children. \"\"\" new_vars = [] for child", "Reverse mode self.zero_grad_values() # Get proper contribution counts self.set_grad_count() # Seeding output, current", "managing these contributions. \"\"\" def zero_grad_values(self): \"\"\" Reset all partial contributions for reverse", "computation functions, with eval and diff to be implemented by subclasses. Use the", "by 1 self.add_grad_contribution(1) self.reverse() # Now set the results self._derivative = {} for", "np.multiply(exp, b_prime) powered = np.power(base, np.subtract(exp, 1)) term1 = np.multiply(coef, powered) # Second", "Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, -grad_value) class Multiplication(Node): def", "exp_prime) powered = np.power(base, exp) term2 = np.multiply(coef, powered) return term1+term2 # Reverse", "return () class Constant(Node): \"\"\" Node representing a constant. Always initiated with 0", "1: input_dict = args[0] if input_dict.keys() != self._variables.keys(): raise TypeError('Input not recognized.') #", "= -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out, denom_out) class Power(Node): def __init__(self): super().__init__() self.type", "__eq__(self,other): return self.value() == other.value() and self.derivative() == other.derivative() def __ne__(self, other): return", "for key, var in self._variables.items(): self._derivative[key] = var._grad_value return self # Uncomment when", "result = fn(self, values) self.set_value(result) return result return wrapper def diff_wrapper(self, fn): \"\"\"", "def get_comp_graph(self): \"\"\" Creates a computational graph for a given node. \"\"\" return", "by zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): num = np.multiply(diffs[0],", "isinstance(value, np.ndarray): # self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\" Reset vectors", "\"\"\" Creates a computational graph for a given node. \"\"\" return create_computational_graph(self) def", "subclasses. Use the node_decorate decorator to update node values upon computation. \"\"\" def", "values, diffs) self.set_derivative(result) return result return wrapper def reverse_wrapper(self, fn): \"\"\" Wrapper for", "def __rsub__(self, value): node = self.make_node(Subtraction(), value, self) return node def __mul__(self, value):", "np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.subtract(diffs[0], diffs[1]) # Reverse mode", "= values numer_out = np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out,", "= 0 for child in self.children: child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate dependency counts", "{} self._variables = {} self._cur_var = None self.children = [] # Name of", "self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self, *children): self.children = children \"\"\"", "# On value set, needs to set the derivative def set_value(self, value): self._value", "children \"\"\" VARIABLES Methods for handling variables, the basic stores for actually computing", "None or not isinstance(name, str): raise ValueError('Name must be given for variable.') self.name", "no children @node_decorate('reverse') def reverse(self, values, grad_value): return () class Constant(Node): \"\"\" Node", "etc.) with wraps self.factory = {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper =", "value, self) return node def __pow__(self, value): node = self.make_node(Power(), self, value) return", "not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs)", "values, grad_value): return (grad_value, -grad_value) class Multiplication(Node): def __init__(self): super().__init__() self.type = 'Multiplication'", "a given node. \"\"\" return create_computational_table(self) \"\"\" SUBCLASSES Node subclasses that define operations", "value else: self._derivative = value # On value set, needs to set the", "self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\" Reset vectors of derivatives recursively", "grad_value): raise NotImplementedError def get_comp_graph(self): \"\"\" Creates a computational graph for a given", "yield var \"\"\" REVERSE MODE Helper functions for properly doing the reverse mode", "in range(self._value.size): self.set_derivative(0) self.var_idx = i self._derivative[i] = 1 yield i # #", "value) return node def __rmul__(self, value): node = self.make_node(Multiplication(), value, self) return node", "increase computation; elegance tradeoff def set_derivative(self, value): if isinstance(self.value(), np.ndarray): self._derivative[:] = value", "() class Constant(Node): \"\"\" Node representing a constant. Always initiated with 0 derivative.", "term coef = np.multiply(exp, b_prime) powered = np.power(base, np.subtract(exp, 1)) term1 = np.multiply(coef,", "reverse(self, values, grad_value): return (-1*np.array(grad_value),) class Subtraction(Node): def __init__(self): super().__init__() self.type = 'Subtraction'", "diff(self, values, diffs): return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0]) # Reverse mode @node_decorate('reverse')", "def reverse(self, values, grad_value): numer, denom = values numer_out = np.divide(grad_value, denom) denom_out", "improve the interface of the package, from calling nodes directly to compute to", "partial contributions for reverse pass \"\"\" self._grad_value = 0 self._cur_grad_count = 0 self._grad_count", "Name of type of node self.type = 'None' # Reverse mode self._grad_value =", "raise NoValueError('Variable %s has been given no value.' % self.name) return self.derivative() #", "in self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1) yield var \"\"\" REVERSE MODE Helper functions", "diffs): base, exp = values b_prime, exp_prime = diffs # First term coef", "issues temp_base = np.copy(base) temp_base[temp_base<=0] = 1 coef = np.multiply(np.log(temp_base), exp_prime) powered =", "of decorators behaves very differently in the case the decorator pattern takes arguments", "def eval(self): if self.value() is None: raise NoValueError('Variable %s has been given no", "return self.value() def diff(self): if self.derivative() is None: raise NoValueError('Variable %s has been", "None: raise NoValueError('Variable %s has been given no value.' % self.name) return self.derivative()", "# Compute the value at this node self.set_variables(input_dict) self.eval() # Compute derivatives based", "computation functions. Implemented as a class for clarity and to serve as a", "self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for v in self._variables: if np.any(self._variables[v].derivative()): self._cur_var = v", "__init__(self): super().__init__() self.type = 'Addition' @node_decorate('evaluate') def eval(self, values): left, right = values", "def __eq__(self,other): return self.value() == other.value() and self.derivative() == other.derivative() def __ne__(self, other):", "var in self.iterate_seeds(): self.diff() else: # Reverse mode self.zero_grad_values() # Get proper contribution", "that we've received all the dependencies we need if not self.ready_to_reverse(): return #", "result = fn(self, values, diffs) self.set_derivative(result) return result return wrapper def reverse_wrapper(self, fn):", "function layer outside now). \"\"\" def __init__(self, mode): # Maintain function metadata (doctstrings,", "value): node = self.make_node(Multiplication(), value, self) return node def __truediv__(self, value): node =", "add_grad_contribution(self, value): # Keep track of addition contribution self._cur_grad_count += 1 self._grad_value +=", "np.multiply(coef, powered) return term1+term2 # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): base,", "implementation of decorators behaves very differently in the case the decorator pattern takes", "def compute(self, *args, **kwargs): \"\"\" Evaluate and differentiate at the given variable values.", "np.multiply(right, grad_value) right_out = np.multiply(left, grad_value) return (left_out, right_out) class Division(Node): def __init__(self):", "left, right = values return np.add(left, right) @node_decorate('differentiate') def diff(self, values, diffs): left,", "cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node \"\"\" MAGIC Various implementations to improve the", "self._variables[v] def iterate_seeds(self): \"\"\" Generator to iterate over all variables of this node,", "def diff(self, values, diffs): return -1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values,", "node = self.make_node(Division(), value, self) return node def __pow__(self, value): node = self.make_node(Power(),", "return node def __eq__(self,other): return self.value() == other.value() and self.derivative() == other.derivative() def", "np.zeros(value.size) for node in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for v in self._variables: if", "values, diffs): raise NotImplementedError # Uncomment when overriding: # @node_decorate('reverse') def reverse(self, values,", "self.children] grad_value = self._grad_value results = fn(self, values, grad_value) # Need to propagate", "position def iterate_idxs(self): for i in range(self._value.size): self.set_derivative(0) self.var_idx = i self._derivative[i] =", "interface of the package, from calling nodes directly to compute to treating them", "right_out = np.multiply(left, grad_value) return (left_out, right_out) class Division(Node): def __init__(self): super().__init__() self.type", "= dict(zip(variable_names, variables)) def set_variables(self, input_dict): \"\"\" Set variables for evaluation. \"\"\" for", "\"\"\" SUBCLASSES Node subclasses that define operations or single values, such as variables", "def reverse(self, values, grad_value): return () class Addition(Node): def __init__(self): super().__init__() self.type =", "wrapper class Node(): \"\"\" Class Node Base Node implementation. \"\"\" def __init__(self): self._value", "Uncomment when overriding: # @node_decorate('differentiate') def diff(self, values, diffs): raise NotImplementedError # Uncomment", "eval(self, values): base, exp = values return np.power(base, exp) @node_decorate('differentiate') def diff(self, values,", "values to variables to compute all partials. \"\"\" for var in self._variables: #", "== other def __hash__(self): return id(self) \"\"\" ATTRIBUTES Methods for setting and getting", "self._variables: if np.any(self._variables[v].derivative()): self._cur_var = v return self._variables[v] def iterate_seeds(self): \"\"\" Generator to", "to iterate over all variables of this node, which assign seed values to", "def eval(self, values): left, right = values return np.add(left, right) @node_decorate('differentiate') def diff(self,", "Base Node implementation. \"\"\" def __init__(self): self._value = None self._derivative = {} self._variables", "TypeError('Input not recognized.') # Compute the value at this node self.set_variables(input_dict) self.eval() #", "@node_decorate('reverse') def reverse(self, values, grad_value): left, right = values left_out = np.multiply(right, grad_value)", "# if self._cur_var not in self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx]", "def eval(self, values): base, exp = values return np.power(base, exp) @node_decorate('differentiate') def diff(self,", "reflect all variables necessary in children. \"\"\" new_vars = [] for child in", "# Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): base, exp = values base_out", "node = self.make_node(Multiplication(), value, self) return node def __truediv__(self, value): node = self.make_node(Division(),", "of {variable_name: value, ...} -Keyword arguments of compute(variable_vame=value, ...) \"\"\" if len(args) ==", "term1+term2 # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): base, exp = values", "been given no value.' % self.name) return self.derivative() # Override dict functionality for", "# Uncomment when overriding: # @node_decorate('reverse') def reverse(self, values, grad_value): raise NotImplementedError def", "super().__init__() self.type = 'Power' @node_decorate('evaluate') def eval(self, values): base, exp = values return", "\"\"\" from functools import wraps import numpy as np import numbers from .visualization", "exp = values base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out = np.multiply(np.multiply(np.log(base), np.power(base,", "node values. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child in self.children]", "super().set_value(value) # Iterate over each vector position def iterate_idxs(self): for i in range(self._value.size):", "in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for v in self._variables: if np.any(self._variables[v].derivative()): self._cur_var =", "value): node = self.make_node(Power(), self, value) return node def __rpow__(self, value): node =", "the variable def compute(self, *args, **kwargs): if len(args) == 0: input_dict = kwargs", "value): self._value = None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over each", "values, grad_value): base, exp = values base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out", "None self._derivative = {} self._variables = {} self._cur_var = None self.children = []", "mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, grad_value) class Negation(Node): def __init__(self):", "decorator pattern takes arguments (__call__ is called only once at decoration, since we", "def __radd__(self, value): node = self.make_node(Addition(), value, self) return node def __neg__(self): node", "(numer_out, denom_out) class Power(Node): def __init__(self): super().__init__() self.type = 'Power' @node_decorate('evaluate') def eval(self,", "result return wrapper def reverse_wrapper(self, fn): \"\"\" Wrapper for updating gradients in reverse", "np.ndarray)): raise TypeError('Value must be numeric or a numpy array.') self._value = value", "else: # if self._cur_var not in self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] =", "handling variables, the basic stores for actually computing the values and derivatives of", "by subclasses. Use the node_decorate decorator to update node values upon computation. \"\"\"", "is called only once at decoration, since we have another function layer outside", "id(self) \"\"\" ATTRIBUTES Methods for setting and getting attributes. \"\"\" def value(self): return", "= self._grad_value results = fn(self, values, grad_value) # Need to propagate results (functions", "self._variables[var].set_derivative(1) yield var \"\"\" REVERSE MODE Helper functions for properly doing the reverse", "value.' % self.name) return self.derivative() # Override dict functionality for variables; I could", "self.var_idx = -1 def eval(self): if self.value() is None: raise NoValueError('Variable %s has", "from .visualization import create_computational_graph, create_computational_table from .settings import settings \"\"\" Custom exceptions. \"\"\"", "# Check that we've received all the dependencies we need if not self.ready_to_reverse():", "diff(self): if self.derivative() is None: raise NoValueError('Variable %s has been given no value.'", "value if not isinstance(new, Node): new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node", "value): return Constant(value) @classmethod def make_node(cls, node, *values): new_nodes = [] for value", "numbers.Number): self._derivative[self._cur_var] = value else: # if self._cur_var not in self._derivative: # self._derivative[self._cur_var]", "need if not self.ready_to_reverse(): return # We need to have done first sweep", "only if necessary, otherwise we run into log(-c) issues temp_base = np.copy(base) temp_base[temp_base<=0]", "layer outside now). \"\"\" def __init__(self, mode): # Maintain function metadata (doctstrings, etc.)", "np.subtract(diffs[0], diffs[1]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, -grad_value)", "Wrapper for updating gradients in reverse pass. \"\"\" @wraps(fn) def wrapper(self): # Check", "eval(self): if self.value() is None: raise NoValueError('Variable %s has been given no value.'", "= np.multiply(exp, b_prime) powered = np.power(base, np.subtract(exp, 1)) term1 = np.multiply(coef, powered) #", "other): return not self == other def __hash__(self): return id(self) \"\"\" ATTRIBUTES Methods", "return create_computational_graph(self) def get_comp_table(self): \"\"\" Creates a computational table for a given node.", "np.divide(num, denom) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): numer, denom =", "values and derivatives of any given node. \"\"\" def update_variables(self): \"\"\" Update current", "eval(self, values): return np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.multiply(diffs[0], values[1])", "update node values upon computation. \"\"\" def compute(self, *args, **kwargs): \"\"\" Evaluate and", "diffs return np.add(left, right) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return", "diff to be implemented by subclasses. Use the node_decorate decorator to update node", "self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\" Reset vectors of derivatives recursively in children \"\"\"", "functions, with eval and diff to be implemented by subclasses. Use the node_decorate", "self.value() == other.value() and self.derivative() == other.derivative() def __ne__(self, other): return not self", "class implementation of decorators behaves very differently in the case the decorator pattern", "__init__(self): self._value = None self._derivative = {} self._variables = {} self._cur_var = None", "right = values return np.add(left, right) @node_decorate('differentiate') def diff(self, values, diffs): left, right", "def __ne__(self, other): return not self == other def __hash__(self): return id(self) \"\"\"", "\"forward\": for var in self.iterate_seeds(): self.diff() else: # Reverse mode self.zero_grad_values() # Get", "if isinstance(value, numbers.Number): self._derivative[self._cur_var] = value else: # if self._cur_var not in self._derivative:", "the class implementation of decorators behaves very differently in the case the decorator", "of derivatives recursively in children \"\"\" if type(self) != Variable: for key, value", "we've received all the dependencies we need if not self.ready_to_reverse(): return # We", "import settings \"\"\" Custom exceptions. \"\"\" class NoValueError(Exception): pass class node_decorate(): \"\"\" Decorator", "Always initiated with 0 derivative. \"\"\" def __init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0) self.type", "@node_decorate('differentiate') def diff(self, values, diffs): left, right = diffs return np.add(left, right) #", "doing the reverse mode of automatic differentiation. These include keeping track of whether", "have another function layer outside now). \"\"\" def __init__(self, mode): # Maintain function", "i # # Override calling the variable def compute(self, *args, **kwargs): if len(args)", "Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): numer, denom = values numer_out =", "derivatives based on mode if settings.current_mode() == \"forward\": for var in self.iterate_seeds(): self.diff()", "raise NotImplementedError # Uncomment when overriding: # @node_decorate('reverse') def reverse(self, values, grad_value): raise", "var in self._variables.items(): self._derivative[key] = var._grad_value return self # Uncomment when overriding: #", "raise NotImplementedError # Uncomment when overriding: # @node_decorate('differentiate') def diff(self, values, diffs): raise", "self._variables[name] = self self.var_idx = -1 def eval(self): if self.value() is None: raise", "def diff_wrapper(self, fn): \"\"\" Wrapper for updating node derivatives. \"\"\" @wraps(fn) def wrapper(self):", "return () class Addition(Node): def __init__(self): super().__init__() self.type = 'Addition' @node_decorate('evaluate') def eval(self,", "Reset all partial contributions for reverse pass \"\"\" self._grad_value = 0 self._cur_grad_count =", "result return wrapper def diff_wrapper(self, fn): \"\"\" Wrapper for updating node derivatives. \"\"\"", "\"\"\" self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0 for child in", "fn): \"\"\" Wrapper for updating node values. \"\"\" @wraps(fn) def wrapper(self): values =", "values): left, right = values return np.add(left, right) @node_decorate('differentiate') def diff(self, values, diffs):", "reverse_wrapper(self, fn): \"\"\" Wrapper for updating gradients in reverse pass. \"\"\" @wraps(fn) def", "self.make_node(Power(), value, self) return node def __eq__(self,other): return self.value() == other.value() and self.derivative()", "0 self._grad_count = 0 for child in self.children: child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate", "diff(self, values, diffs): return np.subtract(diffs[0], diffs[1]) # Reverse mode @node_decorate('reverse') def reverse(self, values,", "= 'Addition' @node_decorate('evaluate') def eval(self, values): left, right = values return np.add(left, right)", "def set_derivative(self, value): self._derivative = value def eval(self): return self.value() def diff(self): return", "for child in self.children: if isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables = list(set(new_vars))", "for v in self._variables: if np.any(self._variables[v].derivative()): self._cur_var = v return self._variables[v] def iterate_seeds(self):", "def set_variables(self, input_dict): \"\"\" Set variables for evaluation. \"\"\" for key, value in", "self._variables[key].set_value(value) # if isinstance(value, np.ndarray): # self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict):", "== other.value() and self.derivative() == other.derivative() def __ne__(self, other): return not self ==", "a symbolic variable. Serves as the basis of evaluation, and then propagates values", "grad_value): return (-1*np.array(grad_value),) class Subtraction(Node): def __init__(self): super().__init__() self.type = 'Subtraction' @node_decorate('evaluate') def", "exp) term2 = np.multiply(coef, powered) return term1+term2 # Reverse mode @node_decorate('reverse') def reverse(self,", "== 1: input_dict = args[0] if input_dict.keys() != self._variables.keys(): raise TypeError('Input not recognized.')", "@node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, grad_value) class Negation(Node): def __init__(self): super().__init__()", "input_dict = args[0] if input_dict.keys() != self._variables.keys(): raise TypeError('Input not recognized.') # Compute", "children, and managing these contributions. \"\"\" def zero_grad_values(self): \"\"\" Reset all partial contributions", "**kwargs) def __repr__(self): output = 'Node(Function = %r, Value = %r, Derivative =", "node is ready to compute its contributions to its children, and managing these", "compute all partials. \"\"\" for var in self._variables: # Reset derivatives for v", "Various implementations to improve the interface of the package, from calling nodes directly", "# First term coef = np.multiply(exp, b_prime) powered = np.power(base, np.subtract(exp, 1)) term1", "if not self.ready_to_reverse(): return # We need to have done first sweep before", "value): node = self.make_node(Addition(), value, self) return node def __neg__(self): node = self.make_node(Negation(),", "Uncomment when overriding: # @node_decorate('evaluate') def eval(self, values): raise NotImplementedError # Uncomment when", "of the package, from calling nodes directly to compute to treating them as", "return not self == other def __hash__(self): return id(self) \"\"\" ATTRIBUTES Methods for", "Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (-1*np.array(grad_value),) class Subtraction(Node): def __init__(self):", "denom_out) class Power(Node): def __init__(self): super().__init__() self.type = 'Power' @node_decorate('evaluate') def eval(self, values):", "input_dict.items(): self._variables[key].set_value(value) # if isinstance(value, np.ndarray): # self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self,", "node def __neg__(self): node = self.make_node(Negation(), self) return node def __sub__(self, value): node", "create_computational_table(self) \"\"\" SUBCLASSES Node subclasses that define operations or single values, such as", "values return np.power(base, exp) @node_decorate('differentiate') def diff(self, values, diffs): base, exp = values", "would increase computation; elegance tradeoff def set_derivative(self, value): if isinstance(self.value(), np.ndarray): self._derivative[:] =", "which assign seed values to variables to compute all partials. \"\"\" for var", "def reverse(self, values, grad_value): base, exp = values base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)),", "Compute the value at this node self.set_variables(input_dict) self.eval() # Compute derivatives based on", "values, diffs): base, exp = values b_prime, exp_prime = diffs # First term", "class Negation(Node): def __init__(self): super().__init__() self.type = 'Negation' @node_decorate('evaluate') def eval(self, values): return", "(left_out, right_out) class Division(Node): def __init__(self): super().__init__() self.type = 'Division' @node_decorate('evaluate') def eval(self,", "= 0 # if exp_prime != 0: # Compute only if necessary, otherwise", "a given node. \"\"\" return create_computational_graph(self) def get_comp_table(self): \"\"\" Creates a computational table", "order return np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.subtract(diffs[0], diffs[1]) #", "ready_to_reverse(self): return (self._cur_grad_count == self._grad_count) def add_grad_contribution(self, value): # Keep track of addition", "self.compute(*args, **kwargs) def __repr__(self): output = 'Node(Function = %r, Value = %r, Derivative", "the given variable values. Inputs methods: -Dictionary of {variable_name: value, ...} -Keyword arguments", "factory. Note: the class implementation of decorators behaves very differently in the case", "values vector respects order return np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return", "\"\"\" ATTRIBUTES Methods for setting and getting attributes. \"\"\" def value(self): return self._value", "def __rpow__(self, value): node = self.make_node(Power(), value, self) return node def __eq__(self,other): return", "for updating gradients in reverse pass. \"\"\" @wraps(fn) def wrapper(self): # Check that", "eval_wrapper(self, fn): \"\"\" Wrapper for updating node values. \"\"\" @wraps(fn) def wrapper(self): values", "__call__(self, *args, **kwargs): return self.compute(*args, **kwargs) # Reverse mode doesn't need to do", "in self._variables: self._derivative[key] = np.zeros(value.size) for node in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for", "# Uncomment when overriding: # @node_decorate('differentiate') def diff(self, values, diffs): raise NotImplementedError #", "values, grad_value) # Need to propagate results (functions need to return same #", "value) return node def __radd__(self, value): node = self.make_node(Addition(), value, self) return node", "np.multiply(coef, powered) # Second term term2 = 0 # if exp_prime != 0:", "self._derivative[key] = var._grad_value return self # Uncomment when overriding: # @node_decorate('evaluate') def eval(self,", "value): if not isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value must be numeric or a", "self.factory[mode] def __call__(self, fn): return self.wrapper(fn) def eval_wrapper(self, fn): \"\"\" Wrapper for updating", "def eval(self, values): if values[1] == 0: raise ZeroDivisionError('Division by zero.') return np.divide(values[0],", "for v in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for idx in self._variables[var].iterate_idxs(): yield", "mode @node_decorate('reverse') def reverse(self, values, grad_value): base, exp = values base_out = np.multiply(np.multiply(exp,", "set_derivative(self, value): var = self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var] = value else: #", "self.set_derivative(result) return result return wrapper def reverse_wrapper(self, fn): \"\"\" Wrapper for updating gradients", "value in input_dict.items(): self._variables[key].set_value(value) # if isinstance(value, np.ndarray): # self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict)", "anything, no children @node_decorate('reverse') def reverse(self, values, grad_value): return () class Addition(Node): def", "self.set_variables(input_dict) self.eval() # Compute derivatives based on mode if settings.current_mode() == \"forward\": for", "from functools import wraps import numpy as np import numbers from .visualization import", "yield i # # Override calling the variable def compute(self, *args, **kwargs): if", "isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names = [var.name for var", "-1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out, denom_out) class Power(Node): def __init__(self): super().__init__() self.type =", "into log(-c) issues temp_base = np.copy(base) temp_base[temp_base<=0] = 1 coef = np.multiply(np.log(temp_base), exp_prime)", "else: self._derivative = value # On value set, needs to set the derivative", "basis of evaluation, and then propagates values through the graph to the final", "values[1]) + np.multiply(diffs[1], values[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): left,", "raise NoValueError('Variable %s has been given no value.' % self.name) return self.value() def", "self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for idx in self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1) yield", "self._variables.keys(): raise TypeError('Input not recognized.') # Compute the value at this node self.set_variables(input_dict)", "contributions. \"\"\" def zero_grad_values(self): \"\"\" Reset all partial contributions for reverse pass \"\"\"", "for a given node. \"\"\" return create_computational_graph(self) def get_comp_table(self): \"\"\" Creates a computational", "return np.subtract(diffs[0], diffs[1]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value,", "decorator to update node values upon computation. \"\"\" def compute(self, *args, **kwargs): \"\"\"", "= np.power(base, np.subtract(exp, 1)) term1 = np.multiply(coef, powered) # Second term term2 =", "self.var_idx = i self._derivative[i] = 1 yield i # # Override calling the", "__repr__(self): output = 'Node(Function = %r, Value = %r, Derivative = %r)' %", "def __rmul__(self, value): node = self.make_node(Multiplication(), value, self) return node def __truediv__(self, value):", "def iterate_idxs(self): for i in range(self._value.size): self.set_derivative(0) self.var_idx = i self._derivative[i] = 1", "updating node values. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child in", "self.add_grad_contribution(1) self.reverse() # Now set the results self._derivative = {} for key, var", "zero_vector_derivative(self, input_dict): \"\"\" Reset vectors of derivatives recursively in children \"\"\" if type(self)", "-1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (-1*np.array(grad_value),) class Subtraction(Node):", "(functions need to return same # of results as children) for idx in", "diff(self, values, diffs): left, right = diffs return np.add(left, right) # Reverse mode", "= values base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out = np.multiply(np.multiply(np.log(base), np.power(base, exp)),", "for i in range(self._value.size): self.set_derivative(0) self.var_idx = i self._derivative[i] = 1 yield i", "Serves as the basis of evaluation, and then propagates values through the graph", "return node \"\"\" MAGIC Various implementations to improve the interface of the package,", "addition contribution self._cur_grad_count += 1 self._grad_value += value \"\"\" COMPUTATION Actual computation functions,", "constants. \"\"\" class Variable(Node): \"\"\" Node representing a symbolic variable. Serves as the", "set_grad_count(self): \"\"\" Calculate dependency counts \"\"\" self._grad_count += 1 for child in self.children:", "def reverse(self, values, grad_value): return (-1*np.array(grad_value),) class Subtraction(Node): def __init__(self): super().__init__() self.type =", "self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var] = value else: # if self._cur_var not in", "to do anything, no children @node_decorate('reverse') def reverse(self, values, grad_value): return () class", "= [child.eval() for child in self.children] diffs = [child.diff() for child in self.children]", "return np.add(left, right) @node_decorate('differentiate') def diff(self, values, diffs): left, right = diffs return", "keep this # consistent, but would increase computation; elegance tradeoff def set_derivative(self, value):", "return (numer_out, denom_out) class Power(Node): def __init__(self): super().__init__() self.type = 'Power' @node_decorate('evaluate') def", "@node_decorate('differentiate') def diff(self, values, diffs): return np.subtract(diffs[0], diffs[1]) # Reverse mode @node_decorate('reverse') def", "that define operations or single values, such as variables and constants. \"\"\" class", "variable list to reflect all variables necessary in children. \"\"\" new_vars = []", "str): raise ValueError('Name must be given for variable.') self.name = name self.type =", "= diffs return np.add(left, right) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value):", "def __init__(self): super().__init__() self.type = 'Addition' @node_decorate('evaluate') def eval(self, values): left, right =", "values through the graph to the final output values. \"\"\" def __init__(self, name=None):", "key in self._variables: self._derivative[key] = np.zeros(value.size) for node in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self):", "recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) #", "# if exp_prime != 0: # Compute only if necessary, otherwise we run", "calling nodes directly to compute to treating them as one would expect in", "this node self.set_variables(input_dict) self.eval() # Compute derivatives based on mode if settings.current_mode() ==", "__rpow__(self, value): node = self.make_node(Power(), value, self) return node def __eq__(self,other): return self.value()", "the values and derivatives of any given node. \"\"\" def update_variables(self): \"\"\" Update", "\"\"\" def value(self): return self._value def derivative(self): return self._derivative def set_value(self, value): if", "Wrapper for updating node derivatives. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for", "@node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, -grad_value) class Multiplication(Node): def __init__(self): super().__init__()", "Get proper contribution counts self.set_grad_count() # Seeding output, current node by 1 self.add_grad_contribution(1)", "value set, needs to set the derivative def set_value(self, value): self._value = None", "self.type = 'Constant' def set_derivative(self, value): self._derivative = value def eval(self): return self.value()", "node = self.make_node(Power(), value, self) return node def __eq__(self,other): return self.value() == other.value()", "derivative. \"\"\" def __init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0) self.type = 'Constant' def set_derivative(self,", "nodes directly to compute to treating them as one would expect in symbolic", "zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): num = np.multiply(diffs[0], values[1])", "to be implemented by subclasses. Use the node_decorate decorator to update node values", "eval(self, values): if values[1] == 0: raise ZeroDivisionError('Division by zero.') return np.divide(values[0], values[1])", "# Reverse mode self.zero_grad_values() # Get proper contribution counts self.set_grad_count() # Seeding output,", "we have another function layer outside now). \"\"\" def __init__(self, mode): # Maintain", "value) return node def __rtruediv__(self, value): node = self.make_node(Division(), value, self) return node", "class Multiplication(Node): def __init__(self): super().__init__() self.type = 'Multiplication' @node_decorate('evaluate') def eval(self, values): return", "values, grad_value): numer, denom = values numer_out = np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer),", "= fn(self, values, diffs) self.set_derivative(result) return result return wrapper def reverse_wrapper(self, fn): \"\"\"", "output, current node by 1 self.add_grad_contribution(1) self.reverse() # Now set the results self._derivative", "not self.ready_to_reverse(): return # We need to have done first sweep before reverse,", "__init__(self): super().__init__() self.type = 'Subtraction' @node_decorate('evaluate') def eval(self, values): # values vector respects", "np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2 if denom == 0: raise", "np.ndarray): # self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\" Reset vectors of", "**kwargs) # Reverse mode doesn't need to do anything, no children @node_decorate('reverse') def", "at this node self.set_variables(input_dict) self.eval() # Compute derivatives based on mode if settings.current_mode()", "np.ndarray) and key in self._variables: self._derivative[key] = np.zeros(value.size) for node in self.children: node.zero_vector_derivative(input_dict)", "single values, such as variables and constants. \"\"\" class Variable(Node): \"\"\" Node representing", "child in self.children: child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count == self._grad_count) def add_grad_contribution(self, value):", "children @node_decorate('reverse') def reverse(self, values, grad_value): return () class Addition(Node): def __init__(self): super().__init__()", "np.add(left, right) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, grad_value)", "values left_out = np.multiply(right, grad_value) right_out = np.multiply(left, grad_value) return (left_out, right_out) class", "values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0]) #", "as children) for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return wrapper class", "\"\"\" def compute(self, *args, **kwargs): \"\"\" Evaluate and differentiate at the given variable", "= %r)' % (self.type, self.value(), self.derivative()) return output def __add__(self, value): node =", "0: # Compute only if necessary, otherwise we run into log(-c) issues temp_base", "node.set_children(*new_nodes) node.update_variables() return node \"\"\" MAGIC Various implementations to improve the interface of", "mode if settings.current_mode() == \"forward\": for var in self.iterate_seeds(): self.diff() else: # Reverse", "diffs): raise NotImplementedError # Uncomment when overriding: # @node_decorate('reverse') def reverse(self, values, grad_value):", "operations or single values, such as variables and constants. \"\"\" class Variable(Node): \"\"\"", "Custom exceptions. \"\"\" class NoValueError(Exception): pass class node_decorate(): \"\"\" Decorator for computation functions.", "def make_constant(cls, value): return Constant(value) @classmethod def make_node(cls, node, *values): new_nodes = []", "dict functionality for variables; I could keep this # consistent, but would increase", "Wrapper for updating node values. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for", "self.reverse_wrapper} self.wrapper = self.factory[mode] def __call__(self, fn): return self.wrapper(fn) def eval_wrapper(self, fn): \"\"\"", "a class for clarity and to serve as a decorator factory. Note: the", "else: new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names = [var.name for var in variables] self._variables", "def compute(self, *args, **kwargs): if len(args) == 0: input_dict = kwargs elif len(args)", "not in input_dict: raise TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self def __call__(self,", "denom = np.array(values[1])**2 if denom == 0: raise ZeroDivisionError('Division by zero.') return np.divide(num,", "These include keeping track of whether or not any node is ready to", "compute(self, *args, **kwargs): if len(args) == 0: input_dict = kwargs elif len(args) ==", "self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1) yield var \"\"\" REVERSE MODE Helper functions for", "...} -Keyword arguments of compute(variable_vame=value, ...) \"\"\" if len(args) == 0: input_dict =", "decoration, since we have another function layer outside now). \"\"\" def __init__(self, mode):", "(__call__ is called only once at decoration, since we have another function layer", "values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.subtract(diffs[0], diffs[1]) # Reverse mode @node_decorate('reverse')", "__rsub__(self, value): node = self.make_node(Subtraction(), value, self) return node def __mul__(self, value): node", "children @node_decorate('reverse') def reverse(self, values, grad_value): return () class Constant(Node): \"\"\" Node representing", "values, grad_value): return () class Constant(Node): \"\"\" Node representing a constant. Always initiated", "or single values, such as variables and constants. \"\"\" class Variable(Node): \"\"\" Node", "\"\"\" VARIABLES Methods for handling variables, the basic stores for actually computing the", "self._grad_value += value \"\"\" COMPUTATION Actual computation functions, with eval and diff to", "def reverse(self, values, grad_value): left, right = values left_out = np.multiply(right, grad_value) right_out", "self, value) return node def __rpow__(self, value): node = self.make_node(Power(), value, self) return", "self._grad_count = 0 for child in self.children: child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate dependency", "(self._cur_grad_count == self._grad_count) def add_grad_contribution(self, value): # Keep track of addition contribution self._cur_grad_count", "- np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2 if denom == 0: raise ZeroDivisionError('Division by", "mode @node_decorate('reverse') def reverse(self, values, grad_value): numer, denom = values numer_out = np.divide(grad_value,", "values = [child.eval() for child in self.children] diffs = [child.diff() for child in", "variables, the basic stores for actually computing the values and derivatives of any", "a computational table for a given node. \"\"\" return create_computational_table(self) \"\"\" SUBCLASSES Node", "numbers from .visualization import create_computational_graph, create_computational_table from .settings import settings \"\"\" Custom exceptions.", "= self.make_node(Subtraction(), self, value) return node def __rsub__(self, value): node = self.make_node(Subtraction(), value,", "over each vector position def iterate_idxs(self): for i in range(self._value.size): self.set_derivative(0) self.var_idx =", "self.type = 'Addition' @node_decorate('evaluate') def eval(self, values): left, right = values return np.add(left,", "first sweep before reverse, assume values exist values = [child.value() for child in", "= {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode] def __call__(self, fn):", "# Reset derivatives for v in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for idx", "= self self.var_idx = -1 def eval(self): if self.value() is None: raise NoValueError('Variable", "= np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out = np.multiply(np.multiply(np.log(base), np.power(base, exp)), grad_value) return (base_out,", "= np.multiply(left, grad_value) return (left_out, right_out) class Division(Node): def __init__(self): super().__init__() self.type =", "self._cur_grad_count = 0 self._grad_count = 0 @classmethod def make_constant(cls, value): return Constant(value) @classmethod", "def ready_to_reverse(self): return (self._cur_grad_count == self._grad_count) def add_grad_contribution(self, value): # Keep track of", "right) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, grad_value) class", "when overriding: # @node_decorate('reverse') def reverse(self, values, grad_value): raise NotImplementedError def get_comp_graph(self): \"\"\"", "__init__(self): super().__init__() self.type = 'Division' @node_decorate('evaluate') def eval(self, values): if values[1] == 0:", "\"\"\" def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) def __repr__(self): output = 'Node(Function", "settings.current_mode() == \"forward\": for var in self.iterate_seeds(): self.diff() else: # Reverse mode self.zero_grad_values()", "expect in symbolic computation. \"\"\" def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) def", "children \"\"\" if type(self) != Variable: for key, value in input_dict.items(): if isinstance(value,", "right = values left_out = np.multiply(right, grad_value) right_out = np.multiply(left, grad_value) return (left_out,", "__init__(self, name=None): super().__init__() if name is None or not isinstance(name, str): raise ValueError('Name", "reverse, assume values exist values = [child.value() for child in self.children] grad_value =", "= np.array(values[1])**2 if denom == 0: raise ZeroDivisionError('Division by zero.') return np.divide(num, denom)", "to compute to treating them as one would expect in symbolic computation. \"\"\"", "value, self) return node def __mul__(self, value): node = self.make_node(Multiplication(), self, value) return", "# Keep track of addition contribution self._cur_grad_count += 1 self._grad_value += value \"\"\"", "return wrapper class Node(): \"\"\" Class Node Base Node implementation. \"\"\" def __init__(self):", "if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over each vector position def iterate_idxs(self):", "grad_value = self._grad_value results = fn(self, values, grad_value) # Need to propagate results", "self.iterate_seeds(): self.diff() else: # Reverse mode self.zero_grad_values() # Get proper contribution counts self.set_grad_count()", "pass class node_decorate(): \"\"\" Decorator for computation functions. Implemented as a class for", "args[0] if input_dict.keys() != self._variables.keys(): raise TypeError('Input not recognized.') # Compute the value", "grad_value): left, right = values left_out = np.multiply(right, grad_value) right_out = np.multiply(left, grad_value)", "new = value if not isinstance(new, Node): new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables()", "= 0 @classmethod def make_constant(cls, value): return Constant(value) @classmethod def make_node(cls, node, *values):", "reverse pass \"\"\" self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0 for", "for child in self.children: child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count == self._grad_count) def add_grad_contribution(self,", "for var in self.iterate_seeds(): self.diff() else: # Reverse mode self.zero_grad_values() # Get proper", "@node_decorate('evaluate') def eval(self, values): base, exp = values return np.power(base, exp) @node_decorate('differentiate') def", "# Name of type of node self.type = 'None' # Reverse mode self._grad_value", "idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return wrapper class Node(): \"\"\" Class", "else: # Reverse mode self.zero_grad_values() # Get proper contribution counts self.set_grad_count() # Seeding", "values) self.set_value(result) return result return wrapper def diff_wrapper(self, fn): \"\"\" Wrapper for updating", "values, such as variables and constants. \"\"\" class Variable(Node): \"\"\" Node representing a", "exp) @node_decorate('differentiate') def diff(self, values, diffs): base, exp = values b_prime, exp_prime =", "return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values,", "powered) return term1+term2 # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): base, exp", "if isinstance(self._variables[var].value(), np.ndarray): for idx in self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1) yield var", "value): node = self.make_node(Multiplication(), self, value) return node def __rmul__(self, value): node =", "another function layer outside now). \"\"\" def __init__(self, mode): # Maintain function metadata", "__neg__(self): node = self.make_node(Negation(), self) return node def __sub__(self, value): node = self.make_node(Subtraction(),", "self._cur_var = v return self._variables[v] def iterate_seeds(self): \"\"\" Generator to iterate over all", "== other.derivative() def __ne__(self, other): return not self == other def __hash__(self): return", "= self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var] = value else: # if self._cur_var not", "node, *values): new_nodes = [] for value in values: new = value if", "in children. \"\"\" new_vars = [] for child in self.children: if isinstance(child, Variable):", "type(self) != Variable: for key, value in input_dict.items(): if isinstance(value, np.ndarray) and key", "differentiate at the given variable values. Inputs methods: -Dictionary of {variable_name: value, ...}", "'Division' @node_decorate('evaluate') def eval(self, values): if values[1] == 0: raise ZeroDivisionError('Division by zero.')", "def reverse(self, values, grad_value): return () class Constant(Node): \"\"\" Node representing a constant.", "class Subtraction(Node): def __init__(self): super().__init__() self.type = 'Subtraction' @node_decorate('evaluate') def eval(self, values): #", "range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return wrapper class Node(): \"\"\" Class Node Base", "right = diffs return np.add(left, right) # Reverse mode @node_decorate('reverse') def reverse(self, values,", "= np.power(base, exp) term2 = np.multiply(coef, powered) return term1+term2 # Reverse mode @node_decorate('reverse')", "= [var.name for var in variables] self._variables = dict(zip(variable_names, variables)) def set_variables(self, input_dict):", "functions for properly doing the reverse mode of automatic differentiation. These include keeping", "could keep this # consistent, but would increase computation; elegance tradeoff def set_derivative(self,", "values numer_out = np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out, denom_out)", "self._derivative = {} for key, var in self._variables.items(): self._derivative[key] = var._grad_value return self", "grad_value): return () class Constant(Node): \"\"\" Node representing a constant. Always initiated with", "value else: # if self._cur_var not in self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx]", "def __init__(self): super().__init__() self.type = 'Subtraction' @node_decorate('evaluate') def eval(self, values): # values vector", "values, diffs): return -1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return", "the interface of the package, from calling nodes directly to compute to treating", "values): return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values, diffs): return -1*np.array(diffs[0]) # Reverse mode", "mode @node_decorate('reverse') def reverse(self, values, grad_value): return (-1*np.array(grad_value),) class Subtraction(Node): def __init__(self): super().__init__()", "np.power(base, exp) @node_decorate('differentiate') def diff(self, values, diffs): base, exp = values b_prime, exp_prime", "self.children = children \"\"\" VARIABLES Methods for handling variables, the basic stores for", "self._derivative[i] = 1 yield i # # Override calling the variable def compute(self,", "node. \"\"\" def update_variables(self): \"\"\" Update current variable list to reflect all variables", "wraps self.factory = {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode] def", "self, value) return node def __radd__(self, value): node = self.make_node(Addition(), value, self) return", "when overriding: # @node_decorate('differentiate') def diff(self, values, diffs): raise NotImplementedError # Uncomment when", "in self._variables.items(): self._derivative[key] = var._grad_value return self # Uncomment when overriding: # @node_decorate('evaluate')", "denom == 0: raise ZeroDivisionError('Division by zero.') return np.divide(num, denom) # Reverse mode", "differently in the case the decorator pattern takes arguments (__call__ is called only", "get_comp_graph(self): \"\"\" Creates a computational graph for a given node. \"\"\" return create_computational_graph(self)", "= 'Node(Function = %r, Value = %r, Derivative = %r)' % (self.type, self.value(),", "output values. \"\"\" def __init__(self, name=None): super().__init__() if name is None or not", "super().__init__() self.type = 'Multiplication' @node_decorate('evaluate') def eval(self, values): return np.multiply(values[0], values[1]) @node_decorate('differentiate') def", "node def __radd__(self, value): node = self.make_node(Addition(), value, self) return node def __neg__(self):", "# Compute only if necessary, otherwise we run into log(-c) issues temp_base =", "mode self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0 @classmethod def make_constant(cls,", "'Subtraction' @node_decorate('evaluate') def eval(self, values): # values vector respects order return np.subtract(values[0], values[1])", "return (-1*np.array(grad_value),) class Subtraction(Node): def __init__(self): super().__init__() self.type = 'Subtraction' @node_decorate('evaluate') def eval(self,", "# Need to propagate results (functions need to return same # of results", "super().__init__() if name is None or not isinstance(name, str): raise ValueError('Name must be", "def diff(self): return self.derivative() # Reverse mode doesn't need to do anything, no", "if values[1] == 0: raise ZeroDivisionError('Division by zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate') def", "Override dict functionality for variables; I could keep this # consistent, but would", "as one would expect in symbolic computation. \"\"\" def __call__(self, *args, **kwargs): return", "treating them as one would expect in symbolic computation. \"\"\" def __call__(self, *args,", "all partial contributions for reverse pass \"\"\" self._grad_value = 0 self._cur_grad_count = 0", "= values return np.power(base, exp) @node_decorate('differentiate') def diff(self, values, diffs): base, exp =", "new_vars = [] for child in self.children: if isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values())", "of node self.type = 'None' # Reverse mode self._grad_value = 0 self._cur_grad_count =", "to treating them as one would expect in symbolic computation. \"\"\" def __call__(self,", "@node_decorate('reverse') def reverse(self, values, grad_value): base, exp = values base_out = np.multiply(np.multiply(exp, np.power(base,", "Multiplication(Node): def __init__(self): super().__init__() self.type = 'Multiplication' @node_decorate('evaluate') def eval(self, values): return np.multiply(values[0],", "grad_value): return (grad_value, grad_value) class Negation(Node): def __init__(self): super().__init__() self.type = 'Negation' @node_decorate('evaluate')", "v return self._variables[v] def iterate_seeds(self): \"\"\" Generator to iterate over all variables of", "self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0 @classmethod def make_constant(cls, value):", "== self._grad_count) def add_grad_contribution(self, value): # Keep track of addition contribution self._cur_grad_count +=", "for var in variables] self._variables = dict(zip(variable_names, variables)) def set_variables(self, input_dict): \"\"\" Set", "self.eval() # Compute derivatives based on mode if settings.current_mode() == \"forward\": for var", "return (grad_value, grad_value) class Negation(Node): def __init__(self): super().__init__() self.type = 'Negation' @node_decorate('evaluate') def", "Keep track of addition contribution self._cur_grad_count += 1 self._grad_value += value \"\"\" COMPUTATION", "= self.make_node(Subtraction(), value, self) return node def __mul__(self, value): node = self.make_node(Multiplication(), self,", "\"\"\" for var in self._variables: # Reset derivatives for v in self._variables: self._variables[v].set_derivative(0)", "such as variables and constants. \"\"\" class Variable(Node): \"\"\" Node representing a symbolic", "all the dependencies we need if not self.ready_to_reverse(): return # We need to", "value): node = self.make_node(Power(), value, self) return node def __eq__(self,other): return self.value() ==", "= value[var.var_idx] def set_children(self, *children): self.children = children \"\"\" VARIABLES Methods for handling", "functionality for variables; I could keep this # consistent, but would increase computation;", "has been given no value.' % self.name) return self.value() def diff(self): if self.derivative()", "# Now set the results self._derivative = {} for key, var in self._variables.items():", "def reverse(self, values, grad_value): raise NotImplementedError def get_comp_graph(self): \"\"\" Creates a computational graph", "self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over each vector position def iterate_idxs(self): for i in", "node def __rmul__(self, value): node = self.make_node(Multiplication(), value, self) return node def __truediv__(self,", "args[0] if self.name not in input_dict: raise TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return", "return node def __mul__(self, value): node = self.make_node(Multiplication(), self, value) return node def", "mode self.zero_grad_values() # Get proper contribution counts self.set_grad_count() # Seeding output, current node", "NotImplementedError # Uncomment when overriding: # @node_decorate('differentiate') def diff(self, values, diffs): raise NotImplementedError", "1 self._grad_value += value \"\"\" COMPUTATION Actual computation functions, with eval and diff", "@node_decorate('differentiate') def diff(self, values, diffs): raise NotImplementedError # Uncomment when overriding: # @node_decorate('reverse')", "Node representing a constant. Always initiated with 0 derivative. \"\"\" def __init__(self, value):", "necessary, otherwise we run into log(-c) issues temp_base = np.copy(base) temp_base[temp_base<=0] = 1", "self._cur_grad_count = 0 self._grad_count = 0 for child in self.children: child.zero_grad_values() def set_grad_count(self):", "stores for actually computing the values and derivatives of any given node. \"\"\"", "def make_node(cls, node, *values): new_nodes = [] for value in values: new =", "for setting and getting attributes. \"\"\" def value(self): return self._value def derivative(self): return", "self) return node def __pow__(self, value): node = self.make_node(Power(), self, value) return node", "\"\"\" Update current variable list to reflect all variables necessary in children. \"\"\"", "isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over each vector position def iterate_idxs(self): for", "of results as children) for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return", "for computation functions. Implemented as a class for clarity and to serve as", "= np.multiply(np.log(temp_base), exp_prime) powered = np.power(base, exp) term2 = np.multiply(coef, powered) return term1+term2", "\"\"\" class NoValueError(Exception): pass class node_decorate(): \"\"\" Decorator for computation functions. Implemented as", "def update_cur_var(self): for v in self._variables: if np.any(self._variables[v].derivative()): self._cur_var = v return self._variables[v]", "with wraps self.factory = {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode]", "arguments of compute(variable_vame=value, ...) \"\"\" if len(args) == 0: input_dict = kwargs elif", "return np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): num = np.multiply(diffs[0], values[1]) -", "var._grad_value return self # Uncomment when overriding: # @node_decorate('evaluate') def eval(self, values): raise", "[child.diff() for child in self.children] result = fn(self, values, diffs) self.set_derivative(result) return result", "\"\"\" Generator to iterate over all variables of this node, which assign seed", "the derivative def set_value(self, value): self._value = None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value)", "or not isinstance(name, str): raise ValueError('Name must be given for variable.') self.name =", "Set variables for evaluation. \"\"\" for key, value in input_dict.items(): self._variables[key].set_value(value) # if", "wrapper(self): # Check that we've received all the dependencies we need if not", "input_dict): \"\"\" Reset vectors of derivatives recursively in children \"\"\" if type(self) !=", "np import numbers from .visualization import create_computational_graph, create_computational_table from .settings import settings \"\"\"", "\"\"\" Wrapper for updating node derivatives. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval()", "from calling nodes directly to compute to treating them as one would expect", "evaluation. \"\"\" for key, value in input_dict.items(): self._variables[key].set_value(value) # if isinstance(value, np.ndarray): #", "all variables necessary in children. \"\"\" new_vars = [] for child in self.children:", "output = 'Node(Function = %r, Value = %r, Derivative = %r)' % (self.type,", "= 'Negation' @node_decorate('evaluate') def eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values, diffs):", "denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out, denom_out) class Power(Node): def __init__(self): super().__init__()", "do anything, no children @node_decorate('reverse') def reverse(self, values, grad_value): return () class Constant(Node):", "Compute derivatives based on mode if settings.current_mode() == \"forward\": for var in self.iterate_seeds():", "\"\"\" Node representing a symbolic variable. Serves as the basis of evaluation, and", "= -1 def eval(self): if self.value() is None: raise NoValueError('Variable %s has been", "= self.make_node(Division(), self, value) return node def __rtruediv__(self, value): node = self.make_node(Division(), value,", "a numpy array.') self._value = value def set_derivative(self, value): var = self.update_cur_var() if", "== \"forward\": for var in self.iterate_seeds(): self.diff() else: # Reverse mode self.zero_grad_values() #", "exceptions. \"\"\" class NoValueError(Exception): pass class node_decorate(): \"\"\" Decorator for computation functions. Implemented", "reverse(self, values, grad_value): left, right = values left_out = np.multiply(right, grad_value) right_out =", "= value if not isinstance(new, Node): new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return", "for updating node values. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child", "= self.factory[mode] def __call__(self, fn): return self.wrapper(fn) def eval_wrapper(self, fn): \"\"\" Wrapper for", "variables to compute all partials. \"\"\" for var in self._variables: # Reset derivatives", "self._cur_var = None self.children = [] # Name of type of node self.type", "reverse(self, values, grad_value): base, exp = values base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value)", "return node def __truediv__(self, value): node = self.make_node(Division(), self, value) return node def", "values, grad_value): return (grad_value, grad_value) class Negation(Node): def __init__(self): super().__init__() self.type = 'Negation'", "in self._variables: # Reset derivatives for v in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray):", "in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for idx in self._variables[var].iterate_idxs(): yield idx else:", "and getting attributes. \"\"\" def value(self): return self._value def derivative(self): return self._derivative def", "REVERSE MODE Helper functions for properly doing the reverse mode of automatic differentiation.", "elegance tradeoff def set_derivative(self, value): if isinstance(self.value(), np.ndarray): self._derivative[:] = value else: self._derivative", "{} self._cur_var = None self.children = [] # Name of type of node", "dependencies we need if not self.ready_to_reverse(): return # We need to have done", "self._value def derivative(self): return self._derivative def set_value(self, value): if not isinstance(value, (numbers.Number, np.ndarray)):", "case the decorator pattern takes arguments (__call__ is called only once at decoration,", "current variable list to reflect all variables necessary in children. \"\"\" new_vars =", "track of whether or not any node is ready to compute its contributions", "np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out = np.multiply(np.multiply(np.log(base), np.power(base, exp)), grad_value) return (base_out, exp_out)", "Constant(value) @classmethod def make_node(cls, node, *values): new_nodes = [] for value in values:", "for reverse pass \"\"\" self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0", "numpy as np import numbers from .visualization import create_computational_graph, create_computational_table from .settings import", "base, exp = values base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out = np.multiply(np.multiply(np.log(base),", "= [child.diff() for child in self.children] result = fn(self, values, diffs) self.set_derivative(result) return", "Methods for handling variables, the basic stores for actually computing the values and", "(grad_value, grad_value) class Negation(Node): def __init__(self): super().__init__() self.type = 'Negation' @node_decorate('evaluate') def eval(self,", "= [] # Name of type of node self.type = 'None' # Reverse", "def set_derivative(self, value): if isinstance(self.value(), np.ndarray): self._derivative[:] = value else: self._derivative = value", "Automatic Differentiation \"\"\" from functools import wraps import numpy as np import numbers", "np.multiply(np.log(temp_base), exp_prime) powered = np.power(base, exp) term2 = np.multiply(coef, powered) return term1+term2 #", "initiated with 0 derivative. \"\"\" def __init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0) self.type =", "self._variables: self._derivative[key] = np.zeros(value.size) for node in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for v", "other.derivative() def __ne__(self, other): return not self == other def __hash__(self): return id(self)", "derivatives for v in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for idx in self._variables[var].iterate_idxs():", "dependency counts \"\"\" self._grad_count += 1 for child in self.children: child.set_grad_count() def ready_to_reverse(self):", "node in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for v in self._variables: if np.any(self._variables[v].derivative()): self._cur_var", "values exist values = [child.value() for child in self.children] grad_value = self._grad_value results", "of compute(variable_vame=value, ...) \"\"\" if len(args) == 0: input_dict = kwargs elif len(args)", "self.set_derivative(0) self.type = 'Constant' def set_derivative(self, value): self._derivative = value def eval(self): return", "we need if not self.ready_to_reverse(): return # We need to have done first", "value): node = self.make_node(Division(), value, self) return node def __pow__(self, value): node =", "contributions for reverse pass \"\"\" self._grad_value = 0 self._cur_grad_count = 0 self._grad_count =", "if len(args) == 0: input_dict = kwargs elif len(args) == 1: input_dict =", "automatic differentiation. These include keeping track of whether or not any node is", "np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self, *children): self.children = children \"\"\" VARIABLES Methods", "np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): num = np.multiply(diffs[0], values[1]) - np.multiply(values[0],", "to compute its contributions to its children, and managing these contributions. \"\"\" def", "[] for child in self.children: if isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables =", "def __pow__(self, value): node = self.make_node(Power(), self, value) return node def __rpow__(self, value):", "Derivative = %r)' % (self.type, self.value(), self.derivative()) return output def __add__(self, value): node", "from .settings import settings \"\"\" Custom exceptions. \"\"\" class NoValueError(Exception): pass class node_decorate():", "return output def __add__(self, value): node = self.make_node(Addition(), self, value) return node def", "input_dict.keys() != self._variables.keys(): raise TypeError('Input not recognized.') # Compute the value at this", "then propagates values through the graph to the final output values. \"\"\" def", "[child.eval() for child in self.children] diffs = [child.diff() for child in self.children] result", "<filename>autodiff/node.py \"\"\" Node Logic for Automatic Differentiation \"\"\" from functools import wraps import", "arguments (__call__ is called only once at decoration, since we have another function", "node def __eq__(self,other): return self.value() == other.value() and self.derivative() == other.derivative() def __ne__(self,", "return node def __rpow__(self, value): node = self.make_node(Power(), value, self) return node def", "the reverse mode of automatic differentiation. These include keeping track of whether or", "respects order return np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.subtract(diffs[0], diffs[1])", "eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values, diffs): return -1*np.array(diffs[0]) # Reverse", "diff(self, values, diffs): raise NotImplementedError # Uncomment when overriding: # @node_decorate('reverse') def reverse(self,", "\"\"\" Evaluate and differentiate at the given variable values. Inputs methods: -Dictionary of", "if settings.current_mode() == \"forward\": for var in self.iterate_seeds(): self.diff() else: # Reverse mode", "len(args) == 0: input_dict = kwargs elif len(args) == 1: input_dict = args[0]", "self.set_value(value) self.set_derivative(0) self.type = 'Constant' def set_derivative(self, value): self._derivative = value def eval(self):", "@node_decorate('evaluate') def eval(self, values): # values vector respects order return np.subtract(values[0], values[1]) @node_decorate('differentiate')", "fn(self, values) self.set_value(result) return result return wrapper def diff_wrapper(self, fn): \"\"\" Wrapper for", "v in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for idx in self._variables[var].iterate_idxs(): yield idx", "doesn't need to do anything, no children @node_decorate('reverse') def reverse(self, values, grad_value): return", "for child in self.children] grad_value = self._grad_value results = fn(self, values, grad_value) #", "\"\"\" return create_computational_graph(self) def get_comp_table(self): \"\"\" Creates a computational table for a given", "def diff(self, values, diffs): base, exp = values b_prime, exp_prime = diffs #", "NoValueError('Variable %s has been given no value.' % self.name) return self.value() def diff(self):", "wrapper(self): values = [child.eval() for child in self.children] diffs = [child.diff() for child", "np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\" Reset vectors of derivatives recursively in children", "derivatives of any given node. \"\"\" def update_variables(self): \"\"\" Update current variable list", "reverse(self, values, grad_value): return (grad_value, grad_value) class Negation(Node): def __init__(self): super().__init__() self.type =", "np.copy(base) temp_base[temp_base<=0] = 1 coef = np.multiply(np.log(temp_base), exp_prime) powered = np.power(base, exp) term2", "= np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\" Reset vectors of derivatives recursively in", "v in self._variables: if np.any(self._variables[v].derivative()): self._cur_var = v return self._variables[v] def iterate_seeds(self): \"\"\"", "node def __mul__(self, value): node = self.make_node(Multiplication(), self, value) return node def __rmul__(self,", "iterate over all variables of this node, which assign seed values to variables", "'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode] def __call__(self, fn): return self.wrapper(fn) def eval_wrapper(self, fn):", "self._variables = {} self._cur_var = None self.children = [] # Name of type", "in self.children: if isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names =", "value): node = self.make_node(Addition(), self, value) return node def __radd__(self, value): node =", "vector respects order return np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.subtract(diffs[0],", "values): raise NotImplementedError # Uncomment when overriding: # @node_decorate('differentiate') def diff(self, values, diffs):", "node. \"\"\" return create_computational_table(self) \"\"\" SUBCLASSES Node subclasses that define operations or single", "temp_base = np.copy(base) temp_base[temp_base<=0] = 1 coef = np.multiply(np.log(temp_base), exp_prime) powered = np.power(base,", "() class Addition(Node): def __init__(self): super().__init__() self.type = 'Addition' @node_decorate('evaluate') def eval(self, values):", "0: raise ZeroDivisionError('Division by zero.') return np.divide(num, denom) # Reverse mode @node_decorate('reverse') def", "given no value.' % self.name) return self.derivative() # Override dict functionality for variables;", "-1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values, diffs): return -1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse') def", "the value at this node self.set_variables(input_dict) self.eval() # Compute derivatives based on mode", "__pow__(self, value): node = self.make_node(Power(), self, value) return node def __rpow__(self, value): node", "variables)) def set_variables(self, input_dict): \"\"\" Set variables for evaluation. \"\"\" for key, value", "__init__(self): super().__init__() self.type = 'Power' @node_decorate('evaluate') def eval(self, values): base, exp = values", "differentiation. These include keeping track of whether or not any node is ready", "np.power(denom, 2)) return (numer_out, denom_out) class Power(Node): def __init__(self): super().__init__() self.type = 'Power'", "self, value) return node def __rsub__(self, value): node = self.make_node(Subtraction(), value, self) return", "node def __pow__(self, value): node = self.make_node(Power(), self, value) return node def __rpow__(self,", "self.type = 'Power' @node_decorate('evaluate') def eval(self, values): base, exp = values return np.power(base,", "other.value() and self.derivative() == other.derivative() def __ne__(self, other): return not self == other", "base, exp = values return np.power(base, exp) @node_decorate('differentiate') def diff(self, values, diffs): base,", "in input_dict.items(): self._variables[key].set_value(value) # if isinstance(value, np.ndarray): # self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def", "diffs = [child.diff() for child in self.children] result = fn(self, values, diffs) self.set_derivative(result)", ".visualization import create_computational_graph, create_computational_table from .settings import settings \"\"\" Custom exceptions. \"\"\" class", "function metadata (doctstrings, etc.) with wraps self.factory = {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse':", "value[var.var_idx] def set_children(self, *children): self.children = children \"\"\" VARIABLES Methods for handling variables,", "results (functions need to return same # of results as children) for idx", "Check that we've received all the dependencies we need if not self.ready_to_reverse(): return", "yield idx else: self._variables[var].set_derivative(1) yield var \"\"\" REVERSE MODE Helper functions for properly", "counts \"\"\" self._grad_count += 1 for child in self.children: child.set_grad_count() def ready_to_reverse(self): return", "self.wrapper(fn) def eval_wrapper(self, fn): \"\"\" Wrapper for updating node values. \"\"\" @wraps(fn) def", "return result return wrapper def diff_wrapper(self, fn): \"\"\" Wrapper for updating node derivatives.", "# @node_decorate('evaluate') def eval(self, values): raise NotImplementedError # Uncomment when overriding: # @node_decorate('differentiate')", "fn(self, values, diffs) self.set_derivative(result) return result return wrapper def reverse_wrapper(self, fn): \"\"\" Wrapper", "derivatives recursively in children \"\"\" if type(self) != Variable: for key, value in", "super().__init__() self.type = 'Subtraction' @node_decorate('evaluate') def eval(self, values): # values vector respects order", "child in self.children] result = fn(self, values) self.set_value(result) return result return wrapper def", "values. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child in self.children] result", "= None self.children = [] # Name of type of node self.type =", "given variable values. Inputs methods: -Dictionary of {variable_name: value, ...} -Keyword arguments of", "def __rtruediv__(self, value): node = self.make_node(Division(), value, self) return node def __pow__(self, value):", "set_children(self, *children): self.children = children \"\"\" VARIABLES Methods for handling variables, the basic", "pass \"\"\" self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0 for child", "no value.' % self.name) return self.value() def diff(self): if self.derivative() is None: raise", "We need to have done first sweep before reverse, assume values exist values", "values, grad_value): left, right = values left_out = np.multiply(right, grad_value) right_out = np.multiply(left,", "# Second term term2 = 0 # if exp_prime != 0: # Compute", "= 1 yield i # # Override calling the variable def compute(self, *args,", "zero.') return np.divide(num, denom) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): numer,", "values[1]) - np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2 if denom == 0: raise ZeroDivisionError('Division", "return self.value() == other.value() and self.derivative() == other.derivative() def __ne__(self, other): return not", "\"\"\" Wrapper for updating node values. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval()", "Variable(Node): \"\"\" Node representing a symbolic variable. Serves as the basis of evaluation,", "var in self._variables: # Reset derivatives for v in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(),", "denom = values numer_out = np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return", "value in input_dict.items(): if isinstance(value, np.ndarray) and key in self._variables: self._derivative[key] = np.zeros(value.size)", "variable_names = [var.name for var in variables] self._variables = dict(zip(variable_names, variables)) def set_variables(self,", "in self._variables: if np.any(self._variables[v].derivative()): self._cur_var = v return self._variables[v] def iterate_seeds(self): \"\"\" Generator", "proper contribution counts self.set_grad_count() # Seeding output, current node by 1 self.add_grad_contribution(1) self.reverse()", "for key, value in input_dict.items(): if isinstance(value, np.ndarray) and key in self._variables: self._derivative[key]", "node, which assign seed values to variables to compute all partials. \"\"\" for", "%s has been given no value.' % self.name) return self.derivative() # Override dict", "class node_decorate(): \"\"\" Decorator for computation functions. Implemented as a class for clarity", "% self.name) return self.derivative() # Override dict functionality for variables; I could keep", "self.zero_grad_values() # Get proper contribution counts self.set_grad_count() # Seeding output, current node by", "exp_prime = diffs # First term coef = np.multiply(exp, b_prime) powered = np.power(base,", "no value.' % self.name) return self.derivative() # Override dict functionality for variables; I", "= fn(self, values, grad_value) # Need to propagate results (functions need to return", "# Override dict functionality for variables; I could keep this # consistent, but", "self) return node def __sub__(self, value): node = self.make_node(Subtraction(), self, value) return node", "self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for idx in self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1)", "def diff(self, values, diffs): raise NotImplementedError # Uncomment when overriding: # @node_decorate('reverse') def", "def __init__(self): super().__init__() self.type = 'Negation' @node_decorate('evaluate') def eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate')", "Value = %r, Derivative = %r)' % (self.type, self.value(), self.derivative()) return output def", "\"\"\" Reset vectors of derivatives recursively in children \"\"\" if type(self) != Variable:", "!= self._variables.keys(): raise TypeError('Input not recognized.') # Compute the value at this node", "in self.children: child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count == self._grad_count) def add_grad_contribution(self, value): #", "{'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode] def __call__(self, fn): return", "@node_decorate('evaluate') def eval(self, values): if values[1] == 0: raise ZeroDivisionError('Division by zero.') return", "values upon computation. \"\"\" def compute(self, *args, **kwargs): \"\"\" Evaluate and differentiate at", "child in self.children: child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate dependency counts \"\"\" self._grad_count +=", "variable.') self.name = name self.type = 'Variable' self._variables[name] = self self.var_idx = -1", "@node_decorate('evaluate') def eval(self, values): return np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return", "**kwargs): \"\"\" Evaluate and differentiate at the given variable values. Inputs methods: -Dictionary", "term term2 = 0 # if exp_prime != 0: # Compute only if", "np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value):", "self.children] result = fn(self, values) self.set_value(result) return result return wrapper def diff_wrapper(self, fn):", "0 self._grad_count = 0 @classmethod def make_constant(cls, value): return Constant(value) @classmethod def make_node(cls,", "if isinstance(value, np.ndarray): # self._derivative[key] = np.zeros(value.size) self.zero_vector_derivative(input_dict) def zero_vector_derivative(self, input_dict): \"\"\" Reset", "= diffs # First term coef = np.multiply(exp, b_prime) powered = np.power(base, np.subtract(exp,", "= v return self._variables[v] def iterate_seeds(self): \"\"\" Generator to iterate over all variables", "dict(zip(variable_names, variables)) def set_variables(self, input_dict): \"\"\" Set variables for evaluation. \"\"\" for key,", "base, exp = values b_prime, exp_prime = diffs # First term coef =", "if isinstance(self.value(), np.ndarray): self._derivative[:] = value else: self._derivative = value # On value", "before reverse, assume values exist values = [child.value() for child in self.children] grad_value", "self._derivative[key] = np.zeros(value.size) for node in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for v in", "in self.iterate_seeds(): self.diff() else: # Reverse mode self.zero_grad_values() # Get proper contribution counts", "def update_variables(self): \"\"\" Update current variable list to reflect all variables necessary in", "assume values exist values = [child.value() for child in self.children] grad_value = self._grad_value", "Generator to iterate over all variables of this node, which assign seed values", "self.children] result = fn(self, values, diffs) self.set_derivative(result) return result return wrapper def reverse_wrapper(self,", "\"\"\" Node Logic for Automatic Differentiation \"\"\" from functools import wraps import numpy", "Seeding output, current node by 1 self.add_grad_contribution(1) self.reverse() # Now set the results", "-grad_value) class Multiplication(Node): def __init__(self): super().__init__() self.type = 'Multiplication' @node_decorate('evaluate') def eval(self, values):", "values return np.add(left, right) @node_decorate('differentiate') def diff(self, values, diffs): left, right = diffs", "in the case the decorator pattern takes arguments (__call__ is called only once", "denom) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): numer, denom = values", "needs to set the derivative def set_value(self, value): self._value = None if isinstance(value,", "value): # Keep track of addition contribution self._cur_grad_count += 1 self._grad_value += value", "num = np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2 if denom ==", "return node def __rmul__(self, value): node = self.make_node(Multiplication(), value, self) return node def", "@wraps(fn) def wrapper(self): # Check that we've received all the dependencies we need", "Note: the class implementation of decorators behaves very differently in the case the", "assign seed values to variables to compute all partials. \"\"\" for var in", "eval and diff to be implemented by subclasses. Use the node_decorate decorator to", "right) @node_decorate('differentiate') def diff(self, values, diffs): left, right = diffs return np.add(left, right)", "self.derivative() # Reverse mode doesn't need to do anything, no children @node_decorate('reverse') def", "takes arguments (__call__ is called only once at decoration, since we have another", "= None self._derivative = {} self._variables = {} self._cur_var = None self.children =", "value.' % self.name) return self.value() def diff(self): if self.derivative() is None: raise NoValueError('Variable", "0 @classmethod def make_constant(cls, value): return Constant(value) @classmethod def make_node(cls, node, *values): new_nodes", "= args[0] if self.name not in input_dict: raise TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1);", "super().__init__() self.type = 'Addition' @node_decorate('evaluate') def eval(self, values): left, right = values return", "class Division(Node): def __init__(self): super().__init__() self.type = 'Division' @node_decorate('evaluate') def eval(self, values): if", "def __init__(self): super().__init__() self.type = 'Power' @node_decorate('evaluate') def eval(self, values): base, exp =", "= self.make_node(Multiplication(), value, self) return node def __truediv__(self, value): node = self.make_node(Division(), self,", "= np.zeros(value.size) for node in self.children: node.zero_vector_derivative(input_dict) def update_cur_var(self): for v in self._variables:", "= 0 self._grad_count = 0 @classmethod def make_constant(cls, value): return Constant(value) @classmethod def", "MAGIC Various implementations to improve the interface of the package, from calling nodes", "left_out = np.multiply(right, grad_value) right_out = np.multiply(left, grad_value) return (left_out, right_out) class Division(Node):", "== 0: raise ZeroDivisionError('Division by zero.') return np.divide(num, denom) # Reverse mode @node_decorate('reverse')", "value in values: new = value if not isinstance(new, Node): new = cls.make_constant(value)", "0 self._cur_grad_count = 0 self._grad_count = 0 @classmethod def make_constant(cls, value): return Constant(value)", "None: raise NoValueError('Variable %s has been given no value.' % self.name) return self.value()", "self) return node def __neg__(self): node = self.make_node(Negation(), self) return node def __sub__(self,", "for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return wrapper class Node(): \"\"\"", "\"\"\" Wrapper for updating gradients in reverse pass. \"\"\" @wraps(fn) def wrapper(self): #", "must be numeric or a numpy array.') self._value = value def set_derivative(self, value):", "table for a given node. \"\"\" return create_computational_table(self) \"\"\" SUBCLASSES Node subclasses that", "np.multiply(diffs[1], values[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): left, right =", "node values upon computation. \"\"\" def compute(self, *args, **kwargs): \"\"\" Evaluate and differentiate", "computation. \"\"\" def compute(self, *args, **kwargs): \"\"\" Evaluate and differentiate at the given", "kwargs elif len(args) == 1: input_dict = args[0] if self.name not in input_dict:", "constant. Always initiated with 0 derivative. \"\"\" def __init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0)", "Addition(Node): def __init__(self): super().__init__() self.type = 'Addition' @node_decorate('evaluate') def eval(self, values): left, right", "coef = np.multiply(exp, b_prime) powered = np.power(base, np.subtract(exp, 1)) term1 = np.multiply(coef, powered)", "exist values = [child.value() for child in self.children] grad_value = self._grad_value results =", "return # We need to have done first sweep before reverse, assume values", "def __repr__(self): output = 'Node(Function = %r, Value = %r, Derivative = %r)'", "as the basis of evaluation, and then propagates values through the graph to", "pattern takes arguments (__call__ is called only once at decoration, since we have", "reverse(self, values, grad_value): numer, denom = values numer_out = np.divide(grad_value, denom) denom_out =", "@node_decorate('reverse') def reverse(self, values, grad_value): numer, denom = values numer_out = np.divide(grad_value, denom)", "return same # of results as children) for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse()", "= value def eval(self): return self.value() def diff(self): return self.derivative() # Reverse mode", "return self.compute(*args, **kwargs) # Reverse mode doesn't need to do anything, no children", "if self.name not in input_dict: raise TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self", "to improve the interface of the package, from calling nodes directly to compute", "self._cur_var not in self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self,", "return self.wrapper(fn) def eval_wrapper(self, fn): \"\"\" Wrapper for updating node values. \"\"\" @wraps(fn)", "since we have another function layer outside now). \"\"\" def __init__(self, mode): #", "in input_dict.items(): if isinstance(value, np.ndarray) and key in self._variables: self._derivative[key] = np.zeros(value.size) for", "overriding: # @node_decorate('reverse') def reverse(self, values, grad_value): raise NotImplementedError def get_comp_graph(self): \"\"\" Creates", "variable values. Inputs methods: -Dictionary of {variable_name: value, ...} -Keyword arguments of compute(variable_vame=value,", "isinstance(name, str): raise ValueError('Name must be given for variable.') self.name = name self.type", "value, self) return node def __eq__(self,other): return self.value() == other.value() and self.derivative() ==", "= self.make_node(Division(), value, self) return node def __pow__(self, value): node = self.make_node(Power(), self,", "list to reflect all variables necessary in children. \"\"\" new_vars = [] for", "not isinstance(name, str): raise ValueError('Name must be given for variable.') self.name = name", "# Iterate over each vector position def iterate_idxs(self): for i in range(self._value.size): self.set_derivative(0)", "elif len(args) == 1: input_dict = args[0] if self.name not in input_dict: raise", "numer, denom = values numer_out = np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2))", "self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return wrapper class Node(): \"\"\" Class Node Base Node", "self.make_node(Subtraction(), self, value) return node def __rsub__(self, value): node = self.make_node(Subtraction(), value, self)", "__mul__(self, value): node = self.make_node(Multiplication(), self, value) return node def __rmul__(self, value): node", "self.compute(*args, **kwargs) # Reverse mode doesn't need to do anything, no children @node_decorate('reverse')", "# @node_decorate('differentiate') def diff(self, values, diffs): raise NotImplementedError # Uncomment when overriding: #", "TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self def __call__(self, *args, **kwargs): return self.compute(*args,", "values, diffs): left, right = diffs return np.add(left, right) # Reverse mode @node_decorate('reverse')", "np.power(base, exp) term2 = np.multiply(coef, powered) return term1+term2 # Reverse mode @node_decorate('reverse') def", "and self.derivative() == other.derivative() def __ne__(self, other): return not self == other def", "def __sub__(self, value): node = self.make_node(Subtraction(), self, value) return node def __rsub__(self, value):", "calling the variable def compute(self, *args, **kwargs): if len(args) == 0: input_dict =", "variables] self._variables = dict(zip(variable_names, variables)) def set_variables(self, input_dict): \"\"\" Set variables for evaluation.", "i in range(self._value.size): self.set_derivative(0) self.var_idx = i self._derivative[i] = 1 yield i #", "= value else: # if self._cur_var not in self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size)", "value def eval(self): return self.value() def diff(self): return self.derivative() # Reverse mode doesn't", "get_comp_table(self): \"\"\" Creates a computational table for a given node. \"\"\" return create_computational_table(self)", "compute(variable_vame=value, ...) \"\"\" if len(args) == 0: input_dict = kwargs elif len(args) ==", "def eval_wrapper(self, fn): \"\"\" Wrapper for updating node values. \"\"\" @wraps(fn) def wrapper(self):", "Reset derivatives for v in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for idx in", "value): node = self.make_node(Subtraction(), self, value) return node def __rsub__(self, value): node =", "= np.multiply(coef, powered) # Second term term2 = 0 # if exp_prime !=", "@wraps(fn) def wrapper(self): values = [child.eval() for child in self.children] diffs = [child.diff()", "b_prime, exp_prime = diffs # First term coef = np.multiply(exp, b_prime) powered =", "# Reverse mode self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0 @classmethod", "values, diffs): num = np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2 if", "node def __sub__(self, value): node = self.make_node(Subtraction(), self, value) return node def __rsub__(self,", "__sub__(self, value): node = self.make_node(Subtraction(), self, value) return node def __rsub__(self, value): node", "= values return np.add(left, right) @node_decorate('differentiate') def diff(self, values, diffs): left, right =", "eval(self, values): raise NotImplementedError # Uncomment when overriding: # @node_decorate('differentiate') def diff(self, values,", "recursively in children \"\"\" if type(self) != Variable: for key, value in input_dict.items():", "[child.eval() for child in self.children] result = fn(self, values) self.set_value(result) return result return", "# Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, -grad_value) class Multiplication(Node):", "value) return node def __rsub__(self, value): node = self.make_node(Subtraction(), value, self) return node", "# Uncomment when overriding: # @node_decorate('evaluate') def eval(self, values): raise NotImplementedError # Uncomment", "def diff(self, values, diffs): return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0]) # Reverse mode", "= [child.value() for child in self.children] grad_value = self._grad_value results = fn(self, values,", "raise ValueError('Name must be given for variable.') self.name = name self.type = 'Variable'", "= i self._derivative[i] = 1 yield i # # Override calling the variable", "recognized.') # Compute the value at this node self.set_variables(input_dict) self.eval() # Compute derivatives", "for key, value in input_dict.items(): self._variables[key].set_value(value) # if isinstance(value, np.ndarray): # self._derivative[key] =", "over all variables of this node, which assign seed values to variables to", "return (grad_value, -grad_value) class Multiplication(Node): def __init__(self): super().__init__() self.type = 'Multiplication' @node_decorate('evaluate') def", "iterate_seeds(self): \"\"\" Generator to iterate over all variables of this node, which assign", "*args, **kwargs): return self.compute(*args, **kwargs) def __repr__(self): output = 'Node(Function = %r, Value", "if np.any(self._variables[v].derivative()): self._cur_var = v return self._variables[v] def iterate_seeds(self): \"\"\" Generator to iterate", "def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) def __repr__(self): output = 'Node(Function =", "basic stores for actually computing the values and derivatives of any given node.", "Iterate over each vector position def iterate_idxs(self): for i in range(self._value.size): self.set_derivative(0) self.var_idx", "create_computational_graph(self) def get_comp_table(self): \"\"\" Creates a computational table for a given node. \"\"\"", "term2 = np.multiply(coef, powered) return term1+term2 # Reverse mode @node_decorate('reverse') def reverse(self, values,", "self.name not in input_dict: raise TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self def", "values[1]) @node_decorate('differentiate') def diff(self, values, diffs): num = np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1])", "\"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child in self.children] diffs =", "def eval(self): return self.value() def diff(self): return self.derivative() # Reverse mode doesn't need", "self.make_node(Subtraction(), value, self) return node def __mul__(self, value): node = self.make_node(Multiplication(), self, value)", "value def set_derivative(self, value): var = self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var] = value", "mode of automatic differentiation. These include keeping track of whether or not any", "super().__init__() self.type = 'Negation' @node_decorate('evaluate') def eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self,", "node = self.make_node(Addition(), value, self) return node def __neg__(self): node = self.make_node(Negation(), self)", "self.type = 'None' # Reverse mode self._grad_value = 0 self._cur_grad_count = 0 self._grad_count", "1 coef = np.multiply(np.log(temp_base), exp_prime) powered = np.power(base, exp) term2 = np.multiply(coef, powered)", "'None' # Reverse mode self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0", "given for variable.') self.name = name self.type = 'Variable' self._variables[name] = self self.var_idx", "numeric or a numpy array.') self._value = value def set_derivative(self, value): var =", "\"\"\" Node representing a constant. Always initiated with 0 derivative. \"\"\" def __init__(self,", "return np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1],", "Actual computation functions, with eval and diff to be implemented by subclasses. Use", "VARIABLES Methods for handling variables, the basic stores for actually computing the values", "= {} self._cur_var = None self.children = [] # Name of type of", "'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode] def __call__(self, fn): return self.wrapper(fn) def", "upon computation. \"\"\" def compute(self, *args, **kwargs): \"\"\" Evaluate and differentiate at the", "Override calling the variable def compute(self, *args, **kwargs): if len(args) == 0: input_dict", "log(-c) issues temp_base = np.copy(base) temp_base[temp_base<=0] = 1 coef = np.multiply(np.log(temp_base), exp_prime) powered", "return self.value() def diff(self): return self.derivative() # Reverse mode doesn't need to do", "return node def __rtruediv__(self, value): node = self.make_node(Division(), value, self) return node def", "...) \"\"\" if len(args) == 0: input_dict = kwargs elif len(args) == 1:", "\"\"\" if type(self) != Variable: for key, value in input_dict.items(): if isinstance(value, np.ndarray)", "self._grad_value results = fn(self, values, grad_value) # Need to propagate results (functions need", "Use the node_decorate decorator to update node values upon computation. \"\"\" def compute(self,", "return node def __radd__(self, value): node = self.make_node(Addition(), value, self) return node def", "= self.make_node(Power(), self, value) return node def __rpow__(self, value): node = self.make_node(Power(), value,", "if denom == 0: raise ZeroDivisionError('Division by zero.') return np.divide(num, denom) # Reverse", "# # Override calling the variable def compute(self, *args, **kwargs): if len(args) ==", "return results return wrapper class Node(): \"\"\" Class Node Base Node implementation. \"\"\"", "self._cur_grad_count += 1 self._grad_value += value \"\"\" COMPUTATION Actual computation functions, with eval", "def __init__(self): self._value = None self._derivative = {} self._variables = {} self._cur_var =", "class NoValueError(Exception): pass class node_decorate(): \"\"\" Decorator for computation functions. Implemented as a", "np.subtract(exp, 1)) term1 = np.multiply(coef, powered) # Second term term2 = 0 #", "self.children] diffs = [child.diff() for child in self.children] result = fn(self, values, diffs)", "update_variables(self): \"\"\" Update current variable list to reflect all variables necessary in children.", "= kwargs elif len(args) == 1: input_dict = args[0] if input_dict.keys() != self._variables.keys():", "= 0 self._grad_count = 0 for child in self.children: child.zero_grad_values() def set_grad_count(self): \"\"\"", "self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self, *children): self.children = children \"\"\" VARIABLES Methods for", "@classmethod def make_node(cls, node, *values): new_nodes = [] for value in values: new", "import numpy as np import numbers from .visualization import create_computational_graph, create_computational_table from .settings", "temp_base[temp_base<=0] = 1 coef = np.multiply(np.log(temp_base), exp_prime) powered = np.power(base, exp) term2 =", "to update node values upon computation. \"\"\" def compute(self, *args, **kwargs): \"\"\" Evaluate", "class for clarity and to serve as a decorator factory. Note: the class", "if exp_prime != 0: # Compute only if necessary, otherwise we run into", "grad_value) right_out = np.multiply(left, grad_value) return (left_out, right_out) class Division(Node): def __init__(self): super().__init__()", "'Negation' @node_decorate('evaluate') def eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values, diffs): return", "behaves very differently in the case the decorator pattern takes arguments (__call__ is", "for variable.') self.name = name self.type = 'Variable' self._variables[name] = self self.var_idx =", "with eval and diff to be implemented by subclasses. Use the node_decorate decorator", "diff(self, values, diffs): num = np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2", "diffs): return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0]) # Reverse mode @node_decorate('reverse') def reverse(self,", "need to do anything, no children @node_decorate('reverse') def reverse(self, values, grad_value): return ()", "self._value = None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over each vector", "variables and constants. \"\"\" class Variable(Node): \"\"\" Node representing a symbolic variable. Serves", "been given no value.' % self.name) return self.value() def diff(self): if self.derivative() is", "def diff(self, values, diffs): return np.subtract(diffs[0], diffs[1]) # Reverse mode @node_decorate('reverse') def reverse(self,", "node = self.make_node(Subtraction(), self, value) return node def __rsub__(self, value): node = self.make_node(Subtraction(),", "'Constant' def set_derivative(self, value): self._derivative = value def eval(self): return self.value() def diff(self):", "or not any node is ready to compute its contributions to its children,", "= value # On value set, needs to set the derivative def set_value(self,", "diffs): num = np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2 if denom", "self.name = name self.type = 'Variable' self._variables[name] = self self.var_idx = -1 def", "grad_value) # Need to propagate results (functions need to return same # of", "len(args) == 1: input_dict = args[0] if self.name not in input_dict: raise TypeError('Input", "\"\"\" def __init__(self, mode): # Maintain function metadata (doctstrings, etc.) with wraps self.factory", "%r)' % (self.type, self.value(), self.derivative()) return output def __add__(self, value): node = self.make_node(Addition(),", "= fn(self, values) self.set_value(result) return result return wrapper def diff_wrapper(self, fn): \"\"\" Wrapper", "__init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0) self.type = 'Constant' def set_derivative(self, value): self._derivative =", "properly doing the reverse mode of automatic differentiation. These include keeping track of", "# Override calling the variable def compute(self, *args, **kwargs): if len(args) == 0:", "mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, -grad_value) class Multiplication(Node): def __init__(self):", "= np.divide(grad_value, denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out, denom_out) class Power(Node):", "set_value(self, value): self._value = None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over", "@node_decorate('reverse') def reverse(self, values, grad_value): raise NotImplementedError def get_comp_graph(self): \"\"\" Creates a computational", "overriding: # @node_decorate('evaluate') def eval(self, values): raise NotImplementedError # Uncomment when overriding: #", "*children): self.children = children \"\"\" VARIABLES Methods for handling variables, the basic stores", "variables = list(set(new_vars)) variable_names = [var.name for var in variables] self._variables = dict(zip(variable_names,", "@node_decorate('differentiate') def diff(self, values, diffs): return -1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse') def reverse(self,", "propagates values through the graph to the final output values. \"\"\" def __init__(self,", "ZeroDivisionError('Division by zero.') return np.divide(num, denom) # Reverse mode @node_decorate('reverse') def reverse(self, values,", "class Constant(Node): \"\"\" Node representing a constant. Always initiated with 0 derivative. \"\"\"", "Decorator for computation functions. Implemented as a class for clarity and to serve", "we run into log(-c) issues temp_base = np.copy(base) temp_base[temp_base<=0] = 1 coef =", "return node def __pow__(self, value): node = self.make_node(Power(), self, value) return node def", "if isinstance(value, np.ndarray) and key in self._variables: self._derivative[key] = np.zeros(value.size) for node in", "(self.type, self.value(), self.derivative()) return output def __add__(self, value): node = self.make_node(Addition(), self, value)", "them as one would expect in symbolic computation. \"\"\" def __call__(self, *args, **kwargs):", "class Variable(Node): \"\"\" Node representing a symbolic variable. Serves as the basis of", "diffs) self.set_derivative(result) return result return wrapper def reverse_wrapper(self, fn): \"\"\" Wrapper for updating", "= values b_prime, exp_prime = diffs # First term coef = np.multiply(exp, b_prime)", "Helper functions for properly doing the reverse mode of automatic differentiation. These include", "be implemented by subclasses. Use the node_decorate decorator to update node values upon", "not isinstance(new, Node): new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node \"\"\" MAGIC", "values base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out = np.multiply(np.multiply(np.log(base), np.power(base, exp)), grad_value)", "@node_decorate('reverse') def reverse(self, values, grad_value): return () class Addition(Node): def __init__(self): super().__init__() self.type", "pass. \"\"\" @wraps(fn) def wrapper(self): # Check that we've received all the dependencies", "\"\"\" def update_variables(self): \"\"\" Update current variable list to reflect all variables necessary", "compute(self, *args, **kwargs): \"\"\" Evaluate and differentiate at the given variable values. Inputs", "self.value() def diff(self): return self.derivative() # Reverse mode doesn't need to do anything,", "{variable_name: value, ...} -Keyword arguments of compute(variable_vame=value, ...) \"\"\" if len(args) == 0:", "for variables; I could keep this # consistent, but would increase computation; elegance", "for actually computing the values and derivatives of any given node. \"\"\" def", "values. \"\"\" def __init__(self, name=None): super().__init__() if name is None or not isinstance(name,", "is ready to compute its contributions to its children, and managing these contributions.", "values, grad_value): return () class Addition(Node): def __init__(self): super().__init__() self.type = 'Addition' @node_decorate('evaluate')", "node_decorate(): \"\"\" Decorator for computation functions. Implemented as a class for clarity and", "in values: new = value if not isinstance(new, Node): new = cls.make_constant(value) new_nodes.append(new)", "1)) term1 = np.multiply(coef, powered) # Second term term2 = 0 # if", "= %r, Value = %r, Derivative = %r)' % (self.type, self.value(), self.derivative()) return", "only once at decoration, since we have another function layer outside now). \"\"\"", "grad_value): base, exp = values base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out =", "np.power(base, np.subtract(exp, 1)) term1 = np.multiply(coef, powered) # Second term term2 = 0", "= %r, Derivative = %r)' % (self.type, self.value(), self.derivative()) return output def __add__(self,", "return self def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) # Reverse mode doesn't", "= var._grad_value return self # Uncomment when overriding: # @node_decorate('evaluate') def eval(self, values):", "def diff(self, values, diffs): left, right = diffs return np.add(left, right) # Reverse", "= self.make_node(Multiplication(), self, value) return node def __rmul__(self, value): node = self.make_node(Multiplication(), value,", "one would expect in symbolic computation. \"\"\" def __call__(self, *args, **kwargs): return self.compute(*args,", "Now set the results self._derivative = {} for key, var in self._variables.items(): self._derivative[key]", "Node): new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node \"\"\" MAGIC Various implementations", "self.type = 'Negation' @node_decorate('evaluate') def eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values,", "\"\"\" def __init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0) self.type = 'Constant' def set_derivative(self, value):", "@wraps(fn) def wrapper(self): values = [child.eval() for child in self.children] result = fn(self,", "def eval(self, values): # values vector respects order return np.subtract(values[0], values[1]) @node_decorate('differentiate') def", "decorators behaves very differently in the case the decorator pattern takes arguments (__call__", "computation. \"\"\" def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) def __repr__(self): output =", "self.derivative() is None: raise NoValueError('Variable %s has been given no value.' % self.name)", "reverse pass. \"\"\" @wraps(fn) def wrapper(self): # Check that we've received all the", "child in self.children] result = fn(self, values, diffs) self.set_derivative(result) return result return wrapper", "input_dict = kwargs elif len(args) == 1: input_dict = args[0] if input_dict.keys() !=", "__init__(self): super().__init__() self.type = 'Multiplication' @node_decorate('evaluate') def eval(self, values): return np.multiply(values[0], values[1]) @node_decorate('differentiate')", "= self.make_node(Power(), value, self) return node def __eq__(self,other): return self.value() == other.value() and", "= np.multiply(coef, powered) return term1+term2 # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value):", "= None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over each vector position", "*args, **kwargs): \"\"\" Evaluate and differentiate at the given variable values. Inputs methods:", "isinstance(self.value(), np.ndarray): self._derivative[:] = value else: self._derivative = value # On value set,", "Evaluate and differentiate at the given variable values. Inputs methods: -Dictionary of {variable_name:", "wrapper(self): values = [child.eval() for child in self.children] result = fn(self, values) self.set_value(result)", "self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self, *children): self.children =", "through the graph to the final output values. \"\"\" def __init__(self, name=None): super().__init__()", "and managing these contributions. \"\"\" def zero_grad_values(self): \"\"\" Reset all partial contributions for", "def wrapper(self): values = [child.eval() for child in self.children] diffs = [child.diff() for", "\"\"\" new_vars = [] for child in self.children: if isinstance(child, Variable): new_vars.append(child) else:", "'Multiplication' @node_decorate('evaluate') def eval(self, values): return np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs):", "functools import wraps import numpy as np import numbers from .visualization import create_computational_graph,", "self._derivative[self._cur_var] = value else: # if self._cur_var not in self._derivative: # self._derivative[self._cur_var] =", "0: raise ZeroDivisionError('Division by zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs):", "= values left_out = np.multiply(right, grad_value) right_out = np.multiply(left, grad_value) return (left_out, right_out)", "# Seeding output, current node by 1 self.add_grad_contribution(1) self.reverse() # Now set the", "self.make_node(Addition(), value, self) return node def __neg__(self): node = self.make_node(Negation(), self) return node", "set_derivative(self, value): self._derivative = value def eval(self): return self.value() def diff(self): return self.derivative()", "[child.value() for child in self.children] grad_value = self._grad_value results = fn(self, values, grad_value)", "\"\"\" return create_computational_table(self) \"\"\" SUBCLASSES Node subclasses that define operations or single values,", "the dependencies we need if not self.ready_to_reverse(): return # We need to have", "Variable: for key, value in input_dict.items(): if isinstance(value, np.ndarray) and key in self._variables:", "isinstance(new, Node): new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node \"\"\" MAGIC Various", "kwargs elif len(args) == 1: input_dict = args[0] if input_dict.keys() != self._variables.keys(): raise", "a decorator factory. Note: the class implementation of decorators behaves very differently in", "+= value \"\"\" COMPUTATION Actual computation functions, with eval and diff to be", "def eval(self, values): raise NotImplementedError # Uncomment when overriding: # @node_decorate('differentiate') def diff(self,", "value): node = self.make_node(Division(), self, value) return node def __rtruediv__(self, value): node =", "any node is ready to compute its contributions to its children, and managing", "to serve as a decorator factory. Note: the class implementation of decorators behaves", "self.make_node(Multiplication(), self, value) return node def __rmul__(self, value): node = self.make_node(Multiplication(), value, self)", "np.multiply(left, grad_value) return (left_out, right_out) class Division(Node): def __init__(self): super().__init__() self.type = 'Division'", "of type of node self.type = 'None' # Reverse mode self._grad_value = 0", "coef = np.multiply(np.log(temp_base), exp_prime) powered = np.power(base, exp) term2 = np.multiply(coef, powered) return", "attributes. \"\"\" def value(self): return self._value def derivative(self): return self._derivative def set_value(self, value):", "var \"\"\" REVERSE MODE Helper functions for properly doing the reverse mode of", "self) return node def __mul__(self, value): node = self.make_node(Multiplication(), self, value) return node", "def iterate_seeds(self): \"\"\" Generator to iterate over all variables of this node, which", "= 1 coef = np.multiply(np.log(temp_base), exp_prime) powered = np.power(base, exp) term2 = np.multiply(coef,", "serve as a decorator factory. Note: the class implementation of decorators behaves very", "update_cur_var(self): for v in self._variables: if np.any(self._variables[v].derivative()): self._cur_var = v return self._variables[v] def", "self, value) return node def __rtruediv__(self, value): node = self.make_node(Division(), value, self) return", "Uncomment when overriding: # @node_decorate('reverse') def reverse(self, values, grad_value): raise NotImplementedError def get_comp_graph(self):", "@node_decorate('evaluate') def eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values, diffs): return -1*np.array(diffs[0])", "isinstance(value, np.ndarray) and key in self._variables: self._derivative[key] = np.zeros(value.size) for node in self.children:", "set, needs to set the derivative def set_value(self, value): self._value = None if", "for updating node derivatives. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child", "\"\"\" def __init__(self): self._value = None self._derivative = {} self._variables = {} self._cur_var", "reverse(self, values, grad_value): raise NotImplementedError def get_comp_graph(self): \"\"\" Creates a computational graph for", "need to have done first sweep before reverse, assume values exist values =", "i self._derivative[i] = 1 yield i # # Override calling the variable def", "idx in self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1) yield var \"\"\" REVERSE MODE Helper", "value, self) return node def __neg__(self): node = self.make_node(Negation(), self) return node def", "return self._derivative def set_value(self, value): if not isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value must", "given no value.' % self.name) return self.value() def diff(self): if self.derivative() is None:", "be numeric or a numpy array.') self._value = value def set_derivative(self, value): var", "results self._derivative = {} for key, var in self._variables.items(): self._derivative[key] = var._grad_value return", "value(self): return self._value def derivative(self): return self._derivative def set_value(self, value): if not isinstance(value,", "mode): # Maintain function metadata (doctstrings, etc.) with wraps self.factory = {'evaluate': self.eval_wrapper,", "'Power' @node_decorate('evaluate') def eval(self, values): base, exp = values return np.power(base, exp) @node_decorate('differentiate')", "np.multiply(values[0], diffs[1]) denom = np.array(values[1])**2 if denom == 0: raise ZeroDivisionError('Division by zero.')", "node.zero_vector_derivative(input_dict) def update_cur_var(self): for v in self._variables: if np.any(self._variables[v].derivative()): self._cur_var = v return", "\"\"\" self._grad_count += 1 for child in self.children: child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count", "of addition contribution self._cur_grad_count += 1 self._grad_value += value \"\"\" COMPUTATION Actual computation", "grad_value) return (left_out, right_out) class Division(Node): def __init__(self): super().__init__() self.type = 'Division' @node_decorate('evaluate')", "values): base, exp = values return np.power(base, exp) @node_decorate('differentiate') def diff(self, values, diffs):", "subclasses that define operations or single values, such as variables and constants. \"\"\"", "self.derivative() # Override dict functionality for variables; I could keep this # consistent,", "new_nodes = [] for value in values: new = value if not isinstance(new,", "(numbers.Number, np.ndarray)): raise TypeError('Value must be numeric or a numpy array.') self._value =", "input_dict = kwargs elif len(args) == 1: input_dict = args[0] if self.name not", "import numbers from .visualization import create_computational_graph, create_computational_table from .settings import settings \"\"\" Custom", "= list(set(new_vars)) variable_names = [var.name for var in variables] self._variables = dict(zip(variable_names, variables))", "**kwargs): if len(args) == 0: input_dict = kwargs elif len(args) == 1: input_dict", "for a given node. \"\"\" return create_computational_table(self) \"\"\" SUBCLASSES Node subclasses that define", "= 'Division' @node_decorate('evaluate') def eval(self, values): if values[1] == 0: raise ZeroDivisionError('Division by", "self.value() def diff(self): if self.derivative() is None: raise NoValueError('Variable %s has been given", "the package, from calling nodes directly to compute to treating them as one", "fn): return self.wrapper(fn) def eval_wrapper(self, fn): \"\"\" Wrapper for updating node values. \"\"\"", "once at decoration, since we have another function layer outside now). \"\"\" def", "!= 0: # Compute only if necessary, otherwise we run into log(-c) issues", "its contributions to its children, and managing these contributions. \"\"\" def zero_grad_values(self): \"\"\"", "0: input_dict = kwargs elif len(args) == 1: input_dict = args[0] if self.name", "with 0 derivative. \"\"\" def __init__(self, value): super().__init__() self.set_value(value) self.set_derivative(0) self.type = 'Constant'", "def __init__(self, mode): # Maintain function metadata (doctstrings, etc.) with wraps self.factory =", "child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count == self._grad_count) def add_grad_contribution(self, value): # Keep track", "zero_grad_values(self): \"\"\" Reset all partial contributions for reverse pass \"\"\" self._grad_value = 0", "On value set, needs to set the derivative def set_value(self, value): self._value =", "self.children = [] # Name of type of node self.type = 'None' #", "@node_decorate('differentiate') def diff(self, values, diffs): return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0]) # Reverse", "self.value() is None: raise NoValueError('Variable %s has been given no value.' % self.name)", "ZeroDivisionError('Division by zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): num =", "results as children) for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results return wrapper", "Methods for setting and getting attributes. \"\"\" def value(self): return self._value def derivative(self):", "wraps import numpy as np import numbers from .visualization import create_computational_graph, create_computational_table from", "raise ZeroDivisionError('Division by zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): num", "\"\"\" for key, value in input_dict.items(): self._variables[key].set_value(value) # if isinstance(value, np.ndarray): # self._derivative[key]", "diff(self, values, diffs): base, exp = values b_prime, exp_prime = diffs # First", "grad_value): return (grad_value, -grad_value) class Multiplication(Node): def __init__(self): super().__init__() self.type = 'Multiplication' @node_decorate('evaluate')", "self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode] def __call__(self, fn): return self.wrapper(fn)", "variables for evaluation. \"\"\" for key, value in input_dict.items(): self._variables[key].set_value(value) # if isinstance(value,", "= self.make_node(Negation(), self) return node def __sub__(self, value): node = self.make_node(Subtraction(), self, value)", "self.reverse() # Now set the results self._derivative = {} for key, var in", "return self.compute(*args, **kwargs) def __repr__(self): output = 'Node(Function = %r, Value = %r,", "node = self.make_node(Multiplication(), self, value) return node def __rmul__(self, value): node = self.make_node(Multiplication(),", "return (self._cur_grad_count == self._grad_count) def add_grad_contribution(self, value): # Keep track of addition contribution", "has been given no value.' % self.name) return self.derivative() # Override dict functionality", "self.make_node(Division(), value, self) return node def __pow__(self, value): node = self.make_node(Power(), self, value)", "self.wrapper = self.factory[mode] def __call__(self, fn): return self.wrapper(fn) def eval_wrapper(self, fn): \"\"\" Wrapper", "__call__(self, fn): return self.wrapper(fn) def eval_wrapper(self, fn): \"\"\" Wrapper for updating node values.", "numpy array.') self._value = value def set_derivative(self, value): var = self.update_cur_var() if isinstance(value,", "exp_prime != 0: # Compute only if necessary, otherwise we run into log(-c)", "= value else: self._derivative = value # On value set, needs to set", "fn): \"\"\" Wrapper for updating gradients in reverse pass. \"\"\" @wraps(fn) def wrapper(self):", "not recognized.') # Compute the value at this node self.set_variables(input_dict) self.eval() # Compute", "@node_decorate('evaluate') def eval(self, values): left, right = values return np.add(left, right) @node_decorate('differentiate') def", "for child in self.children] result = fn(self, values, diffs) self.set_derivative(result) return result return", "and then propagates values through the graph to the final output values. \"\"\"", "and key in self._variables: self._derivative[key] = np.zeros(value.size) for node in self.children: node.zero_vector_derivative(input_dict) def", "1 for child in self.children: child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count == self._grad_count) def", "decorator factory. Note: the class implementation of decorators behaves very differently in the", "done first sweep before reverse, assume values exist values = [child.value() for child", "variables necessary in children. \"\"\" new_vars = [] for child in self.children: if", "= [] for child in self.children: if isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables", "compute its contributions to its children, and managing these contributions. \"\"\" def zero_grad_values(self):", "self._grad_count += 1 for child in self.children: child.set_grad_count() def ready_to_reverse(self): return (self._cur_grad_count ==", "to have done first sweep before reverse, assume values exist values = [child.value()", "updating node derivatives. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child in", "counts self.set_grad_count() # Seeding output, current node by 1 self.add_grad_contribution(1) self.reverse() # Now", "not isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value must be numeric or a numpy array.')", "results = fn(self, values, grad_value) # Need to propagate results (functions need to", "def eval(self, values): return -1*np.array(values[0]) @node_decorate('differentiate') def diff(self, values, diffs): return -1*np.array(diffs[0]) #", "def add_grad_contribution(self, value): # Keep track of addition contribution self._cur_grad_count += 1 self._grad_value", "mode @node_decorate('reverse') def reverse(self, values, grad_value): left, right = values left_out = np.multiply(right,", "Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): left, right = values left_out =", "def __init__(self): super().__init__() self.type = 'Multiplication' @node_decorate('evaluate') def eval(self, values): return np.multiply(values[0], values[1])", "values): # values vector respects order return np.subtract(values[0], values[1]) @node_decorate('differentiate') def diff(self, values,", "\"\"\" Creates a computational table for a given node. \"\"\" return create_computational_table(self) \"\"\"", "to return same # of results as children) for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx])", "variables of this node, which assign seed values to variables to compute all", "Reverse mode self._grad_value = 0 self._cur_grad_count = 0 self._grad_count = 0 @classmethod def", "self._value = None self._derivative = {} self._variables = {} self._cur_var = None self.children", "def zero_grad_values(self): \"\"\" Reset all partial contributions for reverse pass \"\"\" self._grad_value =", "create_computational_graph, create_computational_table from .settings import settings \"\"\" Custom exceptions. \"\"\" class NoValueError(Exception): pass", "# of results as children) for idx in range(len(results)): self.children[idx].add_grad_contribution(results[idx]) self.children[idx].reverse() return results", "base_out = np.multiply(np.multiply(exp, np.power(base, exp-1)), grad_value) exp_out = np.multiply(np.multiply(np.log(base), np.power(base, exp)), grad_value) return", "node self.type = 'None' # Reverse mode self._grad_value = 0 self._cur_grad_count = 0", "# Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (-1*np.array(grad_value),) class Subtraction(Node): def", "return -1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (-1*np.array(grad_value),) class", "symbolic computation. \"\"\" def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) def __repr__(self): output", "input_dict: raise TypeError('Input not recognized.') self.set_value(input_dict[self.name]) self.set_derivative(1); return self def __call__(self, *args, **kwargs):", "left, right = values left_out = np.multiply(right, grad_value) right_out = np.multiply(left, grad_value) return", "wrapper def diff_wrapper(self, fn): \"\"\" Wrapper for updating node derivatives. \"\"\" @wraps(fn) def", "import create_computational_graph, create_computational_table from .settings import settings \"\"\" Custom exceptions. \"\"\" class NoValueError(Exception):", "if input_dict.keys() != self._variables.keys(): raise TypeError('Input not recognized.') # Compute the value at", "diff(self): return self.derivative() # Reverse mode doesn't need to do anything, no children", "np.add(left, right) @node_decorate('differentiate') def diff(self, values, diffs): left, right = diffs return np.add(left,", "elif len(args) == 1: input_dict = args[0] if input_dict.keys() != self._variables.keys(): raise TypeError('Input", "Node representing a symbolic variable. Serves as the basis of evaluation, and then", "self._grad_count = 0 @classmethod def make_constant(cls, value): return Constant(value) @classmethod def make_node(cls, node,", "in self.children] result = fn(self, values, diffs) self.set_derivative(result) return result return wrapper def", "self.set_derivative(0) self.var_idx = i self._derivative[i] = 1 yield i # # Override calling", "self._variables: # Reset derivatives for v in self._variables: self._variables[v].set_derivative(0) if isinstance(self._variables[var].value(), np.ndarray): for", "1: input_dict = args[0] if self.name not in input_dict: raise TypeError('Input not recognized.')", "value): super().__init__() self.set_value(value) self.set_derivative(0) self.type = 'Constant' def set_derivative(self, value): self._derivative = value", "Creates a computational graph for a given node. \"\"\" return create_computational_graph(self) def get_comp_table(self):", "Implemented as a class for clarity and to serve as a decorator factory.", "not any node is ready to compute its contributions to its children, and", "\"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child in self.children] result =", "in symbolic computation. \"\"\" def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) def __repr__(self):", "propagate results (functions need to return same # of results as children) for", "to variables to compute all partials. \"\"\" for var in self._variables: # Reset", "NoValueError('Variable %s has been given no value.' % self.name) return self.derivative() # Override", "def eval(self, values): return np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.multiply(diffs[0],", "return create_computational_table(self) \"\"\" SUBCLASSES Node subclasses that define operations or single values, such", "def derivative(self): return self._derivative def set_value(self, value): if not isinstance(value, (numbers.Number, np.ndarray)): raise", "Node(): \"\"\" Class Node Base Node implementation. \"\"\" def __init__(self): self._value = None", "self.make_node(Power(), self, value) return node def __rpow__(self, value): node = self.make_node(Power(), value, self)", "Reverse mode doesn't need to do anything, no children @node_decorate('reverse') def reverse(self, values,", "is None: raise NoValueError('Variable %s has been given no value.' % self.name) return", "= 'Constant' def set_derivative(self, value): self._derivative = value def eval(self): return self.value() def", "node. \"\"\" return create_computational_graph(self) def get_comp_table(self): \"\"\" Creates a computational table for a", "class Addition(Node): def __init__(self): super().__init__() self.type = 'Addition' @node_decorate('evaluate') def eval(self, values): left,", "diffs): return -1*np.array(diffs[0]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (-1*np.array(grad_value),)", "representing a symbolic variable. Serves as the basis of evaluation, and then propagates", "left, right = diffs return np.add(left, right) # Reverse mode @node_decorate('reverse') def reverse(self,", "0 # if exp_prime != 0: # Compute only if necessary, otherwise we", "node def __rpow__(self, value): node = self.make_node(Power(), value, self) return node def __eq__(self,other):", "\"\"\" def zero_grad_values(self): \"\"\" Reset all partial contributions for reverse pass \"\"\" self._grad_value", "def diff(self, values, diffs): num = np.multiply(diffs[0], values[1]) - np.multiply(values[0], diffs[1]) denom =", "@node_decorate('reverse') def reverse(self, values, grad_value): return () class Constant(Node): \"\"\" Node representing a", "Logic for Automatic Differentiation \"\"\" from functools import wraps import numpy as np", "(doctstrings, etc.) with wraps self.factory = {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper", "need to return same # of results as children) for idx in range(len(results)):", "range(self._value.size): self.set_derivative(0) self.var_idx = i self._derivative[i] = 1 yield i # # Override", "new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names = [var.name for var in variables] self._variables =", "= np.copy(base) temp_base[temp_base<=0] = 1 coef = np.multiply(np.log(temp_base), exp_prime) powered = np.power(base, exp)", "overriding: # @node_decorate('differentiate') def diff(self, values, diffs): raise NotImplementedError # Uncomment when overriding:", "self.factory = {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper, 'reverse': self.reverse_wrapper} self.wrapper = self.factory[mode] def __call__(self,", "if self.value() is None: raise NoValueError('Variable %s has been given no value.' %", "self._variables = dict(zip(variable_names, variables)) def set_variables(self, input_dict): \"\"\" Set variables for evaluation. \"\"\"", "def __init__(self): super().__init__() self.type = 'Division' @node_decorate('evaluate') def eval(self, values): if values[1] ==", "super().__init__() self.set_value(value) self.set_derivative(0) self.type = 'Constant' def set_derivative(self, value): self._derivative = value def", "node.update_variables() return node \"\"\" MAGIC Various implementations to improve the interface of the", "eval(self): return self.value() def diff(self): return self.derivative() # Reverse mode doesn't need to", "self._derivative[:] = value else: self._derivative = value # On value set, needs to", "% self.name) return self.value() def diff(self): if self.derivative() is None: raise NoValueError('Variable %s", "__rmul__(self, value): node = self.make_node(Multiplication(), value, self) return node def __truediv__(self, value): node", "denom) denom_out = -1*np.divide(np.multiply(grad_value,numer), np.power(denom, 2)) return (numer_out, denom_out) class Power(Node): def __init__(self):", "reverse mode of automatic differentiation. These include keeping track of whether or not", "!= Variable: for key, value in input_dict.items(): if isinstance(value, np.ndarray) and key in", "ValueError('Name must be given for variable.') self.name = name self.type = 'Variable' self._variables[name]", "necessary in children. \"\"\" new_vars = [] for child in self.children: if isinstance(child,", "as np import numbers from .visualization import create_computational_graph, create_computational_table from .settings import settings", "np.multiply(values[0], values[1]) @node_decorate('differentiate') def diff(self, values, diffs): return np.multiply(diffs[0], values[1]) + np.multiply(diffs[1], values[0])", "self, value) return node def __rmul__(self, value): node = self.make_node(Multiplication(), value, self) return", "= name self.type = 'Variable' self._variables[name] = self self.var_idx = -1 def eval(self):", "child in self.children] grad_value = self._grad_value results = fn(self, values, grad_value) # Need", "value at this node self.set_variables(input_dict) self.eval() # Compute derivatives based on mode if", "not self == other def __hash__(self): return id(self) \"\"\" ATTRIBUTES Methods for setting", "def set_value(self, value): if not isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value must be numeric", "return wrapper def reverse_wrapper(self, fn): \"\"\" Wrapper for updating gradients in reverse pass.", "isinstance(self._variables[var].value(), np.ndarray): for idx in self._variables[var].iterate_idxs(): yield idx else: self._variables[var].set_derivative(1) yield var \"\"\"", "not in self._derivative: # self._derivative[self._cur_var] = np.zeros(value.size) self._derivative[self._cur_var][var.var_idx] = value[var.var_idx] def set_children(self, *children):", "= 'Variable' self._variables[name] = self self.var_idx = -1 def eval(self): if self.value() is", "self.set_derivative(1); return self def __call__(self, *args, **kwargs): return self.compute(*args, **kwargs) # Reverse mode", "contribution counts self.set_grad_count() # Seeding output, current node by 1 self.add_grad_contribution(1) self.reverse() #", "b_prime) powered = np.power(base, np.subtract(exp, 1)) term1 = np.multiply(coef, powered) # Second term", "= kwargs elif len(args) == 1: input_dict = args[0] if self.name not in", "class Node(): \"\"\" Class Node Base Node implementation. \"\"\" def __init__(self): self._value =", "key, value in input_dict.items(): if isinstance(value, np.ndarray) and key in self._variables: self._derivative[key] =", "define operations or single values, such as variables and constants. \"\"\" class Variable(Node):", "derivatives. \"\"\" @wraps(fn) def wrapper(self): values = [child.eval() for child in self.children] diffs", "self._derivative def set_value(self, value): if not isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value must be", "else: self._variables[var].set_derivative(1) yield var \"\"\" REVERSE MODE Helper functions for properly doing the", "if isinstance(child, Variable): new_vars.append(child) else: new_vars.extend(child._variables.values()) variables = list(set(new_vars)) variable_names = [var.name for", "wrapper def reverse_wrapper(self, fn): \"\"\" Wrapper for updating gradients in reverse pass. \"\"\"", "self._value = value def set_derivative(self, value): var = self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var]", "computational graph for a given node. \"\"\" return create_computational_graph(self) def get_comp_table(self): \"\"\" Creates", "to compute all partials. \"\"\" for var in self._variables: # Reset derivatives for", "but would increase computation; elegance tradeoff def set_derivative(self, value): if isinstance(self.value(), np.ndarray): self._derivative[:]", "# Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value, grad_value) class Negation(Node):", "return node def __neg__(self): node = self.make_node(Negation(), self) return node def __sub__(self, value):", "value, self) return node def __truediv__(self, value): node = self.make_node(Division(), self, value) return", "in children \"\"\" if type(self) != Variable: for key, value in input_dict.items(): if", "and to serve as a decorator factory. Note: the class implementation of decorators", "input_dict = args[0] if self.name not in input_dict: raise TypeError('Input not recognized.') self.set_value(input_dict[self.name])", "in variables] self._variables = dict(zip(variable_names, variables)) def set_variables(self, input_dict): \"\"\" Set variables for", "return np.add(left, right) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): return (grad_value,", "when overriding: # @node_decorate('evaluate') def eval(self, values): raise NotImplementedError # Uncomment when overriding:", "have done first sweep before reverse, assume values exist values = [child.value() for", "be given for variable.') self.name = name self.type = 'Variable' self._variables[name] = self", "\"\"\" COMPUTATION Actual computation functions, with eval and diff to be implemented by", "all partials. \"\"\" for var in self._variables: # Reset derivatives for v in", "node self.set_variables(input_dict) self.eval() # Compute derivatives based on mode if settings.current_mode() == \"forward\":", "== 1: input_dict = args[0] if self.name not in input_dict: raise TypeError('Input not", "value): if isinstance(self.value(), np.ndarray): self._derivative[:] = value else: self._derivative = value # On", "Update current variable list to reflect all variables necessary in children. \"\"\" new_vars", "def reverse(self, values, grad_value): return (grad_value, grad_value) class Negation(Node): def __init__(self): super().__init__() self.type", "each vector position def iterate_idxs(self): for i in range(self._value.size): self.set_derivative(0) self.var_idx = i", "package, from calling nodes directly to compute to treating them as one would", "1 yield i # # Override calling the variable def compute(self, *args, **kwargs):", "[] # Name of type of node self.type = 'None' # Reverse mode", "grad_value): return () class Addition(Node): def __init__(self): super().__init__() self.type = 'Addition' @node_decorate('evaluate') def", "self.ready_to_reverse(): return # We need to have done first sweep before reverse, assume", "very differently in the case the decorator pattern takes arguments (__call__ is called", "reverse(self, values, grad_value): return () class Addition(Node): def __init__(self): super().__init__() self.type = 'Addition'", "super().__init__() self.type = 'Division' @node_decorate('evaluate') def eval(self, values): if values[1] == 0: raise", "reverse(self, values, grad_value): return () class Constant(Node): \"\"\" Node representing a constant. Always", "import wraps import numpy as np import numbers from .visualization import create_computational_graph, create_computational_table", "self == other def __hash__(self): return id(self) \"\"\" ATTRIBUTES Methods for setting and", "@node_decorate('evaluate') def eval(self, values): raise NotImplementedError # Uncomment when overriding: # @node_decorate('differentiate') def", "mode doesn't need to do anything, no children @node_decorate('reverse') def reverse(self, values, grad_value):", "# Get proper contribution counts self.set_grad_count() # Seeding output, current node by 1", "\"\"\" MAGIC Various implementations to improve the interface of the package, from calling", "for child in self.children] diffs = [child.diff() for child in self.children] result =", "for Automatic Differentiation \"\"\" from functools import wraps import numpy as np import", "# Reverse mode doesn't need to do anything, no children @node_decorate('reverse') def reverse(self,", "Negation(Node): def __init__(self): super().__init__() self.type = 'Negation' @node_decorate('evaluate') def eval(self, values): return -1*np.array(values[0])", "create_computational_table from .settings import settings \"\"\" Custom exceptions. \"\"\" class NoValueError(Exception): pass class", "node \"\"\" MAGIC Various implementations to improve the interface of the package, from", "return np.power(base, exp) @node_decorate('differentiate') def diff(self, values, diffs): base, exp = values b_prime,", "graph to the final output values. \"\"\" def __init__(self, name=None): super().__init__() if name", "== 0: raise ZeroDivisionError('Division by zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate') def diff(self, values,", "self.make_node(Multiplication(), value, self) return node def __truediv__(self, value): node = self.make_node(Division(), self, value)", "if not isinstance(value, (numbers.Number, np.ndarray)): raise TypeError('Value must be numeric or a numpy", "(grad_value, -grad_value) class Multiplication(Node): def __init__(self): super().__init__() self.type = 'Multiplication' @node_decorate('evaluate') def eval(self,", "Division(Node): def __init__(self): super().__init__() self.type = 'Division' @node_decorate('evaluate') def eval(self, values): if values[1]", "Node Logic for Automatic Differentiation \"\"\" from functools import wraps import numpy as", "these contributions. \"\"\" def zero_grad_values(self): \"\"\" Reset all partial contributions for reverse pass", "if self.derivative() is None: raise NoValueError('Variable %s has been given no value.' %", "values, diffs): return np.subtract(diffs[0], diffs[1]) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value):", "= 'Power' @node_decorate('evaluate') def eval(self, values): base, exp = values return np.power(base, exp)", "self.type = 'Variable' self._variables[name] = self self.var_idx = -1 def eval(self): if self.value()", "var = self.update_cur_var() if isinstance(value, numbers.Number): self._derivative[self._cur_var] = value else: # if self._cur_var", "in self.children: child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate dependency counts \"\"\" self._grad_count += 1", "# consistent, but would increase computation; elegance tradeoff def set_derivative(self, value): if isinstance(self.value(),", "by zero.') return np.divide(num, denom) # Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value):", "the graph to the final output values. \"\"\" def __init__(self, name=None): super().__init__() if", "consistent, but would increase computation; elegance tradeoff def set_derivative(self, value): if isinstance(self.value(), np.ndarray):", "values, grad_value): return (-1*np.array(grad_value),) class Subtraction(Node): def __init__(self): super().__init__() self.type = 'Subtraction' @node_decorate('evaluate')", "values): if values[1] == 0: raise ZeroDivisionError('Division by zero.') return np.divide(values[0], values[1]) @node_decorate('differentiate')", "self.value(), self.derivative()) return output def __add__(self, value): node = self.make_node(Addition(), self, value) return", "# Reverse mode @node_decorate('reverse') def reverse(self, values, grad_value): left, right = values left_out", "self.children: child.zero_grad_values() def set_grad_count(self): \"\"\" Calculate dependency counts \"\"\" self._grad_count += 1 for", "Second term term2 = 0 # if exp_prime != 0: # Compute only", "term1 = np.multiply(coef, powered) # Second term term2 = 0 # if exp_prime", "new = cls.make_constant(value) new_nodes.append(new) node.set_children(*new_nodes) node.update_variables() return node \"\"\" MAGIC Various implementations to", "set_variables(self, input_dict): \"\"\" Set variables for evaluation. \"\"\" for key, value in input_dict.items():", "values = [child.eval() for child in self.children] result = fn(self, values) self.set_value(result) return", "Maintain function metadata (doctstrings, etc.) with wraps self.factory = {'evaluate': self.eval_wrapper, 'differentiate': self.diff_wrapper,", "self) return node def __truediv__(self, value): node = self.make_node(Division(), self, value) return node", "np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over each vector position def iterate_idxs(self): for i", "to its children, and managing these contributions. \"\"\" def zero_grad_values(self): \"\"\" Reset all", "__ne__(self, other): return not self == other def __hash__(self): return id(self) \"\"\" ATTRIBUTES", "None if isinstance(value, np.ndarray): self.set_derivative(np.zeros(value.size)) super().set_value(value) # Iterate over each vector position def", "representing a constant. Always initiated with 0 derivative. \"\"\" def __init__(self, value): super().__init__()", "= {} self._variables = {} self._cur_var = None self.children = [] # Name", "tradeoff def set_derivative(self, value): if isinstance(self.value(), np.ndarray): self._derivative[:] = value else: self._derivative =", "np.any(self._variables[v].derivative()): self._cur_var = v return self._variables[v] def iterate_seeds(self): \"\"\" Generator to iterate over", "output def __add__(self, value): node = self.make_node(Addition(), self, value) return node def __radd__(self,", "track of addition contribution self._cur_grad_count += 1 self._grad_value += value \"\"\" COMPUTATION Actual", "must be given for variable.') self.name = name self.type = 'Variable' self._variables[name] =", "= 'None' # Reverse mode self._grad_value = 0 self._cur_grad_count = 0 self._grad_count =" ]
[ "y * h + exp_box[1] cv2.circle(img, (int(x), int(y)), 1, (255, 255, 0), 1)", "pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y = pt x = x * w", "= int(y + exp_box[1]) cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1) h,", "exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x, y in baidu_lm:", "- 127.5) / 127.5 net.blobs['data'].data[0] = cropped out = net.forward() landmark = out['Dense2'][0]", "box in boxes: if not is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img, box) cropped =", "not is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result =", "extract_baidu_lm72 from baidu import call_baidu_api def create_net(model_dir, iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' %", "img_path = sys.argv[2] model_dir = config.MODEL_DIR if len(sys.argv) > 3: model_dir = sys.argv[3]", "for box in boxes: if not is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img, box) cropped", "for x, y in baidu_lm: x = int(x + exp_box[0]) y = int(y", "from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu import call_baidu_api def create_net(model_dir, iter_num):", "= create_net(model_dir, iter_num) mtcnn = fast_mtcnn() boxes = mtcnn(img_path) for box in boxes:", "out = net.forward() landmark = out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y", "= (cropped - 127.5) / 127.5 net.blobs['data'].data[0] = cropped out = net.forward() landmark", "w, _ = cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0,", "= x * w + exp_box[0] y = y * h + exp_box[1]", "= extract_baidu_lm72(baidu_result[0][-1]) for x, y in baidu_lm: x = int(x + exp_box[0]) y", "= int(x + exp_box[0]) y = int(y + exp_box[1]) cv2.circle(img, (int(x), int(y)), 1,", "model_path, caffe.TEST) if __name__ == '__main__': iter_num = int(sys.argv[1]) img_path = sys.argv[2] model_dir", "for pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y = pt x = x *", "config sys.path.append('../') from fast_mtcnn import fast_mtcnn from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from", "proto_path = 'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST) if __name__ == '__main__': iter_num =", "fast_mtcnn() boxes = mtcnn(img_path) for box in boxes: if not is_valid_facebox(box): continue exp_box", "int(y)), 1, (255, 0, 0), 1) h, w, _ = cropped.shape cropped =", "== '__main__': iter_num = int(sys.argv[1]) img_path = sys.argv[2] model_dir = config.MODEL_DIR if len(sys.argv)", "= 'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST) if __name__ == '__main__': iter_num = int(sys.argv[1])", "= int(sys.argv[1]) img_path = sys.argv[2] model_dir = config.MODEL_DIR if len(sys.argv) > 3: model_dir", "create_net(model_dir, iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path = 'landmark.prototxt' return caffe.Net(proto_path,", "= sys.argv[3] img = cv2.imread(img_path) net = create_net(model_dir, iter_num) mtcnn = fast_mtcnn() boxes", "= pt x = x * w + exp_box[0] y = y *", "model_dir = config.MODEL_DIR if len(sys.argv) > 3: model_dir = sys.argv[3] img = cv2.imread(img_path)", "sys.path.append('../') from fast_mtcnn import fast_mtcnn from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu", "'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST) if __name__ == '__main__': iter_num = int(sys.argv[1]) img_path", "= cv2.imread(img_path) net = create_net(model_dir, iter_num) mtcnn = fast_mtcnn() boxes = mtcnn(img_path) for", "landmark.reshape((config.LANDMARK_SIZE, 2)): x, y = pt x = x * w + exp_box[0]", "% iter_num) proto_path = 'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST) if __name__ == '__main__':", "= out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y = pt x =", "expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1])", "'') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x, y in baidu_lm: x = int(x +", "= cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0, 2) cropped = (cropped -", "baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x, y in baidu_lm: x = int(x + exp_box[0])", "import cv2 import time import caffe import numpy as np import config sys.path.append('../')", "sys.argv[2] model_dir = config.MODEL_DIR if len(sys.argv) > 3: model_dir = sys.argv[3] img =", "cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1) h, w, _ = cropped.shape", "baidu import call_baidu_api def create_net(model_dir, iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path", "boxes = mtcnn(img_path) for box in boxes: if not is_valid_facebox(box): continue exp_box =", "caffe.Net(proto_path, model_path, caffe.TEST) if __name__ == '__main__': iter_num = int(sys.argv[1]) img_path = sys.argv[2]", "= expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '') baidu_lm =", "127.5 net.blobs['data'].data[0] = cropped out = net.forward() landmark = out['Dense2'][0] for pt in", "y = pt x = x * w + exp_box[0] y = y", "import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu import call_baidu_api def create_net(model_dir, iter_num): model_path =", "net.forward() landmark = out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y = pt", "'landmark_iter_%d.caffemodel' % iter_num) proto_path = 'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST) if __name__ ==", "import sys import cv2 import time import caffe import numpy as np import", "= y * h + exp_box[1] cv2.circle(img, (int(x), int(y)), 1, (255, 255, 0),", "cv2 import time import caffe import numpy as np import config sys.path.append('../') from", "model_dir = sys.argv[3] img = cv2.imread(img_path) net = create_net(model_dir, iter_num) mtcnn = fast_mtcnn()", "cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0, 2) cropped =", "from baidu import call_baidu_api def create_net(model_dir, iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num)", "+ exp_box[1]) cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1) h, w, _", "h + exp_box[1] cv2.circle(img, (int(x), int(y)), 1, (255, 255, 0), 1) time.sleep(0.5) cv2.imwrite('result.jpg',", "iter_num = int(sys.argv[1]) img_path = sys.argv[2] model_dir = config.MODEL_DIR if len(sys.argv) > 3:", "img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x, y in", "if not is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result", "out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y = pt x = x", "import fast_mtcnn from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu import call_baidu_api def", "'__main__': iter_num = int(sys.argv[1]) img_path = sys.argv[2] model_dir = config.MODEL_DIR if len(sys.argv) >", "__name__ == '__main__': iter_num = int(sys.argv[1]) img_path = sys.argv[2] model_dir = config.MODEL_DIR if", "mtcnn = fast_mtcnn() boxes = mtcnn(img_path) for box in boxes: if not is_valid_facebox(box):", "is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped,", "iter_num) mtcnn = fast_mtcnn() boxes = mtcnn(img_path) for box in boxes: if not", "sys import cv2 import time import caffe import numpy as np import config", "3: model_dir = sys.argv[3] img = cv2.imread(img_path) net = create_net(model_dir, iter_num) mtcnn =", "h, w, _ = cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped,", "exp_box = expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '') baidu_lm", "from fast_mtcnn import fast_mtcnn from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu import", "caffe.TEST) if __name__ == '__main__': iter_num = int(sys.argv[1]) img_path = sys.argv[2] model_dir =", "pt x = x * w + exp_box[0] y = y * h", "cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0, 2) cropped = (cropped - 127.5)", "config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0, 2) cropped = (cropped - 127.5) / 127.5", "import config sys.path.append('../') from fast_mtcnn import fast_mtcnn from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72", "> 3: model_dir = sys.argv[3] img = cv2.imread(img_path) net = create_net(model_dir, iter_num) mtcnn", "x * w + exp_box[0] y = y * h + exp_box[1] cv2.circle(img,", "x = x * w + exp_box[0] y = y * h +", "* h + exp_box[1] cv2.circle(img, (int(x), int(y)), 1, (255, 255, 0), 1) time.sleep(0.5)", "continue exp_box = expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '')", "= sys.argv[2] model_dir = config.MODEL_DIR if len(sys.argv) > 3: model_dir = sys.argv[3] img", "= config.MODEL_DIR if len(sys.argv) > 3: model_dir = sys.argv[3] img = cv2.imread(img_path) net", "w + exp_box[0] y = y * h + exp_box[1] cv2.circle(img, (int(x), int(y)),", "cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x,", "img = cv2.imread(img_path) net = create_net(model_dir, iter_num) mtcnn = fast_mtcnn() boxes = mtcnn(img_path)", "call_baidu_api def create_net(model_dir, iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path = 'landmark.prototxt'", "import numpy as np import config sys.path.append('../') from fast_mtcnn import fast_mtcnn from gen_landmark", "x, y = pt x = x * w + exp_box[0] y =", "= img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x, y", "iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path = 'landmark.prototxt' return caffe.Net(proto_path, model_path,", "= os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path = 'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST) if", "cropped = (cropped - 127.5) / 127.5 net.blobs['data'].data[0] = cropped out = net.forward()", "is_valid_facebox, extract_baidu_lm72 from baidu import call_baidu_api def create_net(model_dir, iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel'", "= mtcnn(img_path) for box in boxes: if not is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img,", "cv2.imread(img_path) net = create_net(model_dir, iter_num) mtcnn = fast_mtcnn() boxes = mtcnn(img_path) for box", "0), 1) h, w, _ = cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped", "1, (255, 0, 0), 1) h, w, _ = cropped.shape cropped = cv2.resize(cropped,", "len(sys.argv) > 3: model_dir = sys.argv[3] img = cv2.imread(img_path) net = create_net(model_dir, iter_num)", "call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x, y in baidu_lm: x = int(x", "= np.swapaxes(cropped, 0, 2) cropped = (cropped - 127.5) / 127.5 net.blobs['data'].data[0] =", "config.MODEL_DIR if len(sys.argv) > 3: model_dir = sys.argv[3] img = cv2.imread(img_path) net =", "os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path = 'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST) if __name__", "(255, 0, 0), 1) h, w, _ = cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE,", "(config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0, 2) cropped = (cropped - 127.5) /", "2) cropped = (cropped - 127.5) / 127.5 net.blobs['data'].data[0] = cropped out =", "cropped out = net.forward() landmark = out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x,", "y in baidu_lm: x = int(x + exp_box[0]) y = int(y + exp_box[1])", "import call_baidu_api def create_net(model_dir, iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path =", "box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]] baidu_result = call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for", "exp_box[0]) y = int(y + exp_box[1]) cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0),", "= net.forward() landmark = out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y =", "in baidu_lm: x = int(x + exp_box[0]) y = int(y + exp_box[1]) cv2.circle(img,", "as np import config sys.path.append('../') from fast_mtcnn import fast_mtcnn from gen_landmark import expand_mtcnn_box,", "mtcnn(img_path) for box in boxes: if not is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img, box)", "int(y + exp_box[1]) cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1) h, w,", "expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu import call_baidu_api def create_net(model_dir, iter_num): model_path = os.path.join(model_dir,", "baidu_lm: x = int(x + exp_box[0]) y = int(y + exp_box[1]) cv2.circle(img, (int(x),", "exp_box[0] y = y * h + exp_box[1] cv2.circle(img, (int(x), int(y)), 1, (255,", "return caffe.Net(proto_path, model_path, caffe.TEST) if __name__ == '__main__': iter_num = int(sys.argv[1]) img_path =", "= fast_mtcnn() boxes = mtcnn(img_path) for box in boxes: if not is_valid_facebox(box): continue", "net.blobs['data'].data[0] = cropped out = net.forward() landmark = out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE,", "exp_box[1]) cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1) h, w, _ =", "cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0, 2) cropped = (cropped", "import caffe import numpy as np import config sys.path.append('../') from fast_mtcnn import fast_mtcnn", "_ = cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0, 2)", "model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path = 'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST)", "= cropped out = net.forward() landmark = out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE, 2)):", "iter_num) proto_path = 'landmark.prototxt' return caffe.Net(proto_path, model_path, caffe.TEST) if __name__ == '__main__': iter_num", "= call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x, y in baidu_lm: x =", "boxes: if not is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3], exp_box[0]:exp_box[2]]", "(int(x), int(y)), 1, (255, 0, 0), 1) h, w, _ = cropped.shape cropped", "+ exp_box[0]) y = int(y + exp_box[1]) cv2.circle(img, (int(x), int(y)), 1, (255, 0,", "= cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped = np.swapaxes(cropped, 0, 2) cropped", "/ 127.5 net.blobs['data'].data[0] = cropped out = net.forward() landmark = out['Dense2'][0] for pt", "y = y * h + exp_box[1] cv2.circle(img, (int(x), int(y)), 1, (255, 255,", "extract_baidu_lm72(baidu_result[0][-1]) for x, y in baidu_lm: x = int(x + exp_box[0]) y =", "sys.argv[3] img = cv2.imread(img_path) net = create_net(model_dir, iter_num) mtcnn = fast_mtcnn() boxes =", "+ exp_box[0] y = y * h + exp_box[1] cv2.circle(img, (int(x), int(y)), 1,", "y = int(y + exp_box[1]) cv2.circle(img, (int(x), int(y)), 1, (255, 0, 0), 1)", "(cropped - 127.5) / 127.5 net.blobs['data'].data[0] = cropped out = net.forward() landmark =", "in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y = pt x = x * w +", "0, 0), 1) h, w, _ = cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE))", "+ exp_box[1] cv2.circle(img, (int(x), int(y)), 1, (255, 255, 0), 1) time.sleep(0.5) cv2.imwrite('result.jpg', img)", "fast_mtcnn from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu import call_baidu_api def create_net(model_dir,", "127.5) / 127.5 net.blobs['data'].data[0] = cropped out = net.forward() landmark = out['Dense2'][0] for", "in boxes: if not is_valid_facebox(box): continue exp_box = expand_mtcnn_box(img, box) cropped = img[exp_box[1]:exp_box[3],", "import os import sys import cv2 import time import caffe import numpy as", "os import sys import cv2 import time import caffe import numpy as np", "* w + exp_box[0] y = y * h + exp_box[1] cv2.circle(img, (int(x),", "time import caffe import numpy as np import config sys.path.append('../') from fast_mtcnn import", "if __name__ == '__main__': iter_num = int(sys.argv[1]) img_path = sys.argv[2] model_dir = config.MODEL_DIR", "np.swapaxes(cropped, 0, 2) cropped = (cropped - 127.5) / 127.5 net.blobs['data'].data[0] = cropped", "np import config sys.path.append('../') from fast_mtcnn import fast_mtcnn from gen_landmark import expand_mtcnn_box, is_valid_facebox,", "int(x + exp_box[0]) y = int(y + exp_box[1]) cv2.circle(img, (int(x), int(y)), 1, (255,", "x, y in baidu_lm: x = int(x + exp_box[0]) y = int(y +", "gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu import call_baidu_api def create_net(model_dir, iter_num): model_path", "import time import caffe import numpy as np import config sys.path.append('../') from fast_mtcnn", "1) h, w, _ = cropped.shape cropped = cv2.resize(cropped, (config.IMG_SIZE, config.IMG_SIZE)) cropped =", "create_net(model_dir, iter_num) mtcnn = fast_mtcnn() boxes = mtcnn(img_path) for box in boxes: if", "if len(sys.argv) > 3: model_dir = sys.argv[3] img = cv2.imread(img_path) net = create_net(model_dir,", "caffe import numpy as np import config sys.path.append('../') from fast_mtcnn import fast_mtcnn from", "baidu_result = call_baidu_api(cropped, '') baidu_lm = extract_baidu_lm72(baidu_result[0][-1]) for x, y in baidu_lm: x", "numpy as np import config sys.path.append('../') from fast_mtcnn import fast_mtcnn from gen_landmark import", "net = create_net(model_dir, iter_num) mtcnn = fast_mtcnn() boxes = mtcnn(img_path) for box in", "def create_net(model_dir, iter_num): model_path = os.path.join(model_dir, 'landmark_iter_%d.caffemodel' % iter_num) proto_path = 'landmark.prototxt' return", "landmark = out['Dense2'][0] for pt in landmark.reshape((config.LANDMARK_SIZE, 2)): x, y = pt x", "x = int(x + exp_box[0]) y = int(y + exp_box[1]) cv2.circle(img, (int(x), int(y)),", "cropped = np.swapaxes(cropped, 0, 2) cropped = (cropped - 127.5) / 127.5 net.blobs['data'].data[0]", "int(sys.argv[1]) img_path = sys.argv[2] model_dir = config.MODEL_DIR if len(sys.argv) > 3: model_dir =", "fast_mtcnn import fast_mtcnn from gen_landmark import expand_mtcnn_box, is_valid_facebox, extract_baidu_lm72 from baidu import call_baidu_api", "0, 2) cropped = (cropped - 127.5) / 127.5 net.blobs['data'].data[0] = cropped out", "2)): x, y = pt x = x * w + exp_box[0] y" ]
[ "as github class Factory(object): \"\"\" Factory \"\"\" @staticmethod def create(track): \"\"\" git type", "patch_tracking.util.upstream.git as git import patch_tracking.util.upstream.github as github class Factory(object): \"\"\" Factory \"\"\" @staticmethod", "if track.version_control == 'github': return github.GitHub(track) if track.version_control == 'git': return git.Git(track) return", "import patch_tracking.util.upstream.github as github class Factory(object): \"\"\" Factory \"\"\" @staticmethod def create(track): \"\"\"", "\"\"\" Factory \"\"\" @staticmethod def create(track): \"\"\" git type \"\"\" if track.version_control ==", "github class Factory(object): \"\"\" Factory \"\"\" @staticmethod def create(track): \"\"\" git type \"\"\"", "import patch_tracking.util.upstream.git as git import patch_tracking.util.upstream.github as github class Factory(object): \"\"\" Factory \"\"\"", "Factory(object): \"\"\" Factory \"\"\" @staticmethod def create(track): \"\"\" git type \"\"\" if track.version_control", "as git import patch_tracking.util.upstream.github as github class Factory(object): \"\"\" Factory \"\"\" @staticmethod def", "class Factory(object): \"\"\" Factory \"\"\" @staticmethod def create(track): \"\"\" git type \"\"\" if", "git import patch_tracking.util.upstream.github as github class Factory(object): \"\"\" Factory \"\"\" @staticmethod def create(track):", "\"\"\" if track.version_control == 'github': return github.GitHub(track) if track.version_control == 'git': return git.Git(track)", "init\"\"\" import patch_tracking.util.upstream.git as git import patch_tracking.util.upstream.github as github class Factory(object): \"\"\" Factory", "type \"\"\" if track.version_control == 'github': return github.GitHub(track) if track.version_control == 'git': return", "create(track): \"\"\" git type \"\"\" if track.version_control == 'github': return github.GitHub(track) if track.version_control", "<reponame>openeuler-mirror/patch-tracking \"\"\"upstream init\"\"\" import patch_tracking.util.upstream.git as git import patch_tracking.util.upstream.github as github class Factory(object):", "git type \"\"\" if track.version_control == 'github': return github.GitHub(track) if track.version_control == 'git':", "\"\"\"upstream init\"\"\" import patch_tracking.util.upstream.git as git import patch_tracking.util.upstream.github as github class Factory(object): \"\"\"", "patch_tracking.util.upstream.github as github class Factory(object): \"\"\" Factory \"\"\" @staticmethod def create(track): \"\"\" git", "Factory \"\"\" @staticmethod def create(track): \"\"\" git type \"\"\" if track.version_control == 'github':", "\"\"\" @staticmethod def create(track): \"\"\" git type \"\"\" if track.version_control == 'github': return", "@staticmethod def create(track): \"\"\" git type \"\"\" if track.version_control == 'github': return github.GitHub(track)", "track.version_control == 'github': return github.GitHub(track) if track.version_control == 'git': return git.Git(track) return None", "\"\"\" git type \"\"\" if track.version_control == 'github': return github.GitHub(track) if track.version_control ==", "def create(track): \"\"\" git type \"\"\" if track.version_control == 'github': return github.GitHub(track) if" ]
[ "*[ nn.Sequential( nn.Linear(channels[i - 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation, ) for i", "nn def MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True): return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i - 1],", "- 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation, ) for i in range(1, len(channels))", "1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation, ) for i in range(1, len(channels)) ]", "channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation, ) for i in range(1, len(channels)) ] )", "MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True): return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i - 1], channels[i], bias=bias),", "nn.Sequential( nn.Linear(channels[i - 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation, ) for i in", "torch import nn def MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True): return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i", "<filename>pcdet/models/model_utils/basic_blocks.py from torch import nn def MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True): return nn.Sequential( *[", "nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i - 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation, ) for", "return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i - 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation, )", "import nn def MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True): return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i -", "def MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True): return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i - 1], channels[i],", "activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True): return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i - 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i],", "bias=True): return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i - 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation,", "from torch import nn def MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True): return nn.Sequential( *[ nn.Sequential(", "nn.Linear(channels[i - 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum), activation, ) for i in range(1,", "bn_momentum=0.1, bias=True): return nn.Sequential( *[ nn.Sequential( nn.Linear(channels[i - 1], channels[i], bias=bias), nn.BatchNorm1d(channels[i], momentum=bn_momentum)," ]
[ "TypingGameColorMixin: \"\"\" Settings of colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR =", "\"\"\" Settings of colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR = RGBColor.BLUE", "__slots__ = () TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR = RGBColor.BLUE TYPING_MODIFY_COLOR = RGBColor.YELLOW TYPING_ERROR_COLOR", "import RGBColor class TypingGameColorMixin: \"\"\" Settings of colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR =", "colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR = RGBColor.BLUE TYPING_MODIFY_COLOR = RGBColor.YELLOW", "= () TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR = RGBColor.BLUE TYPING_MODIFY_COLOR = RGBColor.YELLOW TYPING_ERROR_COLOR =", "RGBColor class TypingGameColorMixin: \"\"\" Settings of colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR = RGBColor.GREEN", "() TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR = RGBColor.BLUE TYPING_MODIFY_COLOR = RGBColor.YELLOW TYPING_ERROR_COLOR = RGBColor.RED", "from typing_game.api.generics import RGBColor class TypingGameColorMixin: \"\"\" Settings of colors.\"\"\" __slots__ = ()", "Settings of colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR = RGBColor.BLUE TYPING_MODIFY_COLOR", "typing_game.api.generics import RGBColor class TypingGameColorMixin: \"\"\" Settings of colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR", "<filename>typing_game/api/mixins/colors.py<gh_stars>0 from typing_game.api.generics import RGBColor class TypingGameColorMixin: \"\"\" Settings of colors.\"\"\" __slots__ =", "of colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR = RGBColor.BLUE TYPING_MODIFY_COLOR =", "class TypingGameColorMixin: \"\"\" Settings of colors.\"\"\" __slots__ = () TYPING_CORRECT_COLOR = RGBColor.GREEN TYPING_CUR_POS_COLOR" ]
[ "links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' + 'contains(@type, \"atom\") or contains(@type, \"rdf\")]')", "as f: users = yaml.safe_load(f.read()) def fetch_links(url): req = urllib2.Request(url, headers=HEADERS) tree =", "#!/usr/bin/python from lxml import html import yaml import sys import urllib2 import urlparse", "datetime print 'Import feeds at ' + str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh;", ">>sys.stderr, \"No link found for %s\" % (url,) return None for (name, u)", "else: print >>sys.stderr, \"No link found for %s\" % (url,) return None for", "not link.startswith('http:'): link = urlparse.urljoin(url, link) e.append(link) with open('bloggers.yml', 'w') as f: yaml.safe_dump(users,", "l.attrib.get('title','')] if candidates: return candidates[0].attrib['href'] elif links: return links[0].attrib['href'] else: print >>sys.stderr, \"No", "(title, url) = e[0:2] e[0] = e[0].strip() if len(e) == 3: continue link", "lxml import html import yaml import sys import urllib2 import urlparse from datetime", "return candidates[0].attrib['href'] elif links: return links[0].attrib['href'] else: print >>sys.stderr, \"No link found for", "= urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or '", "in users.items(): for e in u['links']: (title, url) = e[0:2] e[0] = e[0].strip()", "f: users = yaml.safe_load(f.read()) def fetch_links(url): req = urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read())", "'Import feeds at ' + str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac", "= e[0:2] e[0] = e[0].strip() if len(e) == 3: continue link = fetch_links(url)", "for %s\" % (url,) return None for (name, u) in users.items(): for e", "yaml import sys import urllib2 import urlparse from datetime import datetime print 'Import", "{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml')", "[l for l in links if 'atom' in l.attrib['type'] and 'comments' not in", "feeds at ' + str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS", "(url,) return None for (name, u) in users.items(): for e in u['links']: (title,", "'atom' in l.attrib['type'] and 'comments' not in l.attrib['href'].lower() and 'comments' not in l.attrib.get('title','')]", "for l in links if 'atom' in l.attrib['type'] and 'comments' not in l.attrib['href'].lower()", "or contains(@type, \"rdf\")]') candidates = [l for l in links if 'atom' in", "Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as f: users", "'comments' not in l.attrib['href'].lower() and 'comments' not in l.attrib.get('title','')] if candidates: return candidates[0].attrib['href']", "return links[0].attrib['href'] else: print >>sys.stderr, \"No link found for %s\" % (url,) return", "not in l.attrib.get('title','')] if candidates: return candidates[0].attrib['href'] elif links: return links[0].attrib['href'] else: print", "rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as f: users = yaml.safe_load(f.read()) def fetch_links(url): req", "'comments' not in l.attrib.get('title','')] if candidates: return candidates[0].attrib['href'] elif links: return links[0].attrib['href'] else:", "def fetch_links(url): req = urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type,", "candidates[0].attrib['href'] elif links: return links[0].attrib['href'] else: print >>sys.stderr, \"No link found for %s\"", "at ' + str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X", "in l.attrib.get('title','')] if candidates: return candidates[0].attrib['href'] elif links: return links[0].attrib['href'] else: print >>sys.stderr,", "headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' + 'contains(@type,", "html import yaml import sys import urllib2 import urlparse from datetime import datetime", "and 'comments' not in l.attrib['href'].lower() and 'comments' not in l.attrib.get('title','')] if candidates: return", "continue link = fetch_links(url) if link: if not link.startswith('http:'): link = urlparse.urljoin(url, link)", "+ str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0)", "l.attrib['type'] and 'comments' not in l.attrib['href'].lower() and 'comments' not in l.attrib.get('title','')] if candidates:", "= {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with", "users.items(): for e in u['links']: (title, url) = e[0:2] e[0] = e[0].strip() if", "str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101", "tree = html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' + 'contains(@type, \"atom\")", "in l.attrib['type'] and 'comments' not in l.attrib['href'].lower() and 'comments' not in l.attrib.get('title','')] if", "e[0].strip() if len(e) == 3: continue link = fetch_links(url) if link: if not", "urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' +", "' + str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7;", "len(e) == 3: continue link = fetch_links(url) if link: if not link.startswith('http:'): link", "req = urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or", "= html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' + 'contains(@type, \"atom\") or", "open('bloggers.yml') as f: users = yaml.safe_load(f.read()) def fetch_links(url): req = urllib2.Request(url, headers=HEADERS) tree", "%s\" % (url,) return None for (name, u) in users.items(): for e in", "(name, u) in users.items(): for e in u['links']: (title, url) = e[0:2] e[0]", "if link: if not link.startswith('http:'): link = urlparse.urljoin(url, link) e.append(link) with open('bloggers.yml', 'w')", "link = fetch_links(url) if link: if not link.startswith('http:'): link = urlparse.urljoin(url, link) e.append(link)", "'contains(@type, \"atom\") or contains(@type, \"rdf\")]') candidates = [l for l in links if", "found for %s\" % (url,) return None for (name, u) in users.items(): for", "e[0:2] e[0] = e[0].strip() if len(e) == 3: continue link = fetch_links(url) if", "print 'Import feeds at ' + str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel", "candidates = [l for l in links if 'atom' in l.attrib['type'] and 'comments'", "None for (name, u) in users.items(): for e in u['links']: (title, url) =", "\"rss\") or ' + 'contains(@type, \"atom\") or contains(@type, \"rdf\")]') candidates = [l for", "import html import yaml import sys import urllib2 import urlparse from datetime import", "url) = e[0:2] e[0] = e[0].strip() if len(e) == 3: continue link =", "X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as f: users = yaml.safe_load(f.read()) def", "if 'atom' in l.attrib['type'] and 'comments' not in l.attrib['href'].lower() and 'comments' not in", "links: return links[0].attrib['href'] else: print >>sys.stderr, \"No link found for %s\" % (url,)", "= [l for l in links if 'atom' in l.attrib['type'] and 'comments' not", "html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' + 'contains(@type, \"atom\") or contains(@type,", "% (url,) return None for (name, u) in users.items(): for e in u['links']:", "not in l.attrib['href'].lower() and 'comments' not in l.attrib.get('title','')] if candidates: return candidates[0].attrib['href'] elif", "datetime import datetime print 'Import feeds at ' + str(datetime.now()) HEADERS = {'User-Agent':", "\"No link found for %s\" % (url,) return None for (name, u) in", "sys import urllib2 import urlparse from datetime import datetime print 'Import feeds at", "Firefox/11.0'} with open('bloggers.yml') as f: users = yaml.safe_load(f.read()) def fetch_links(url): req = urllib2.Request(url,", "OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as f: users = yaml.safe_load(f.read())", "HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'}", "import urllib2 import urlparse from datetime import datetime print 'Import feeds at '", "== 3: continue link = fetch_links(url) if link: if not link.startswith('http:'): link =", "10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as f: users = yaml.safe_load(f.read()) def fetch_links(url):", "'//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' + 'contains(@type, \"atom\") or contains(@type, \"rdf\")]') candidates = [l", "link: if not link.startswith('http:'): link = urlparse.urljoin(url, link) e.append(link) with open('bloggers.yml', 'w') as", "if not link.startswith('http:'): link = urlparse.urljoin(url, link) e.append(link) with open('bloggers.yml', 'w') as f:", "with open('bloggers.yml') as f: users = yaml.safe_load(f.read()) def fetch_links(url): req = urllib2.Request(url, headers=HEADERS)", "+ 'contains(@type, \"atom\") or contains(@type, \"rdf\")]') candidates = [l for l in links", "in u['links']: (title, url) = e[0:2] e[0] = e[0].strip() if len(e) == 3:", "link found for %s\" % (url,) return None for (name, u) in users.items():", "l.attrib['href'].lower() and 'comments' not in l.attrib.get('title','')] if candidates: return candidates[0].attrib['href'] elif links: return", "tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' + 'contains(@type, \"atom\") or contains(@type, \"rdf\")]') candidates =", "print >>sys.stderr, \"No link found for %s\" % (url,) return None for (name,", "for (name, u) in users.items(): for e in u['links']: (title, url) = e[0:2]", "in links if 'atom' in l.attrib['type'] and 'comments' not in l.attrib['href'].lower() and 'comments'", "= e[0].strip() if len(e) == 3: continue link = fetch_links(url) if link: if", "= tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\") or ' + 'contains(@type, \"atom\") or contains(@type, \"rdf\")]') candidates", "= fetch_links(url) if link: if not link.startswith('http:'): link = urlparse.urljoin(url, link) e.append(link) with", "fetch_links(url): req = urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath( '//link[@rel=\"alternate\"][contains(@type, \"rss\")", "return None for (name, u) in users.items(): for e in u['links']: (title, url)", "l in links if 'atom' in l.attrib['type'] and 'comments' not in l.attrib['href'].lower() and", "for e in u['links']: (title, url) = e[0:2] e[0] = e[0].strip() if len(e)", "import yaml import sys import urllib2 import urlparse from datetime import datetime print", "Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as f: users =", "import sys import urllib2 import urlparse from datetime import datetime print 'Import feeds", "import datetime print 'Import feeds at ' + str(datetime.now()) HEADERS = {'User-Agent': 'Mozilla/5.0", "(Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as f:", "links if 'atom' in l.attrib['type'] and 'comments' not in l.attrib['href'].lower() and 'comments' not", "urllib2 import urlparse from datetime import datetime print 'Import feeds at ' +", "3: continue link = fetch_links(url) if link: if not link.startswith('http:'): link = urlparse.urljoin(url,", "fetch_links(url) if link: if not link.startswith('http:'): link = urlparse.urljoin(url, link) e.append(link) with open('bloggers.yml',", "candidates: return candidates[0].attrib['href'] elif links: return links[0].attrib['href'] else: print >>sys.stderr, \"No link found", "u) in users.items(): for e in u['links']: (title, url) = e[0:2] e[0] =", "e in u['links']: (title, url) = e[0:2] e[0] = e[0].strip() if len(e) ==", "\"atom\") or contains(@type, \"rdf\")]') candidates = [l for l in links if 'atom'", "u['links']: (title, url) = e[0:2] e[0] = e[0].strip() if len(e) == 3: continue", "from datetime import datetime print 'Import feeds at ' + str(datetime.now()) HEADERS =", "\"rdf\")]') candidates = [l for l in links if 'atom' in l.attrib['type'] and", "links[0].attrib['href'] else: print >>sys.stderr, \"No link found for %s\" % (url,) return None", "in l.attrib['href'].lower() and 'comments' not in l.attrib.get('title','')] if candidates: return candidates[0].attrib['href'] elif links:", "Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as f: users = yaml.safe_load(f.read()) def fetch_links(url): req =", "users = yaml.safe_load(f.read()) def fetch_links(url): req = urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links", "or ' + 'contains(@type, \"atom\") or contains(@type, \"rdf\")]') candidates = [l for l", "link.startswith('http:'): link = urlparse.urljoin(url, link) e.append(link) with open('bloggers.yml', 'w') as f: yaml.safe_dump(users, f)", "' + 'contains(@type, \"atom\") or contains(@type, \"rdf\")]') candidates = [l for l in", "from lxml import html import yaml import sys import urllib2 import urlparse from", "if candidates: return candidates[0].attrib['href'] elif links: return links[0].attrib['href'] else: print >>sys.stderr, \"No link", "= yaml.safe_load(f.read()) def fetch_links(url): req = urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links =", "e[0] = e[0].strip() if len(e) == 3: continue link = fetch_links(url) if link:", "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'} with open('bloggers.yml') as", "and 'comments' not in l.attrib.get('title','')] if candidates: return candidates[0].attrib['href'] elif links: return links[0].attrib['href']", "if len(e) == 3: continue link = fetch_links(url) if link: if not link.startswith('http:'):", "urlparse from datetime import datetime print 'Import feeds at ' + str(datetime.now()) HEADERS", "import urlparse from datetime import datetime print 'Import feeds at ' + str(datetime.now())", "contains(@type, \"rdf\")]') candidates = [l for l in links if 'atom' in l.attrib['type']", "elif links: return links[0].attrib['href'] else: print >>sys.stderr, \"No link found for %s\" %", "yaml.safe_load(f.read()) def fetch_links(url): req = urllib2.Request(url, headers=HEADERS) tree = html.fromstring(urllib2.urlopen(req).read()) links = tree.xpath(" ]
[ "cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a look at our data #Normalize", "#Normalize the title news['TEXT'] = [normalize_text(s) for s in news['TITLE']] news['CATEGORY'].unique() # pull", "#Function to normalize the text def normalize_text(s): #lower-case the text s = s.lower()", "for s in news['TITLE']] news['CATEGORY'].unique() # pull the data into vectors vectorizer =", "encoder.fit_transform(news['CATEGORY']) # split into train and test sets x_train, x_test, y_train, y_test =", "y, test_size=0.2) # Instantiate the classifier: clf clf = RandomForestClassifier() # Fit the", "2018 @author: sshekhar \"\"\" # get some libraries that will be useful import", "# data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.ensemble import RandomForestClassifier #", "Created on Fri Aug 10 17:37:56 2018 @author: sshekhar \"\"\" # get some", "in string.punctuation: s = s.replace(ch, \" \") # make sure we didn't introduce", "= LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) # split into train and test sets x_train,", "pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a look at our data #Normalize the title news['TEXT']", "re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub(' +',' ', s) return", "function for encoding categories from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix from", "normalize the text def normalize_text(s): #lower-case the text s = s.lower() # remove", "RandomForestClassifier # function to split the data for cross-validation from sklearn.model_selection import train_test_split", "split into train and test sets x_train, x_test, y_train, y_test = train_test_split(x, y,", "re import string import numpy as np # linear algebra import pandas as", "CountVectorizer # function for encoding categories from sklearn.preprocessing import LabelEncoder from sklearn.metrics import", "# pull the data into vectors vectorizer = CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder", "{'b' : 'business', 't' : 'science and technology', 'e' : 'entertainment', 'm' :", "import re import string import numpy as np # linear algebra import pandas", "sklearn.model_selection import train_test_split # function for transforming documents into counts from sklearn.feature_extraction.text import", "the training data clf.fit(x_train, y_train) # Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred", "clf clf = RandomForestClassifier() # Fit the classifier to the training data clf.fit(x_train,", "sklearn.model_selection import cross_val_score #Function to normalize the text def normalize_text(s): #lower-case the text", "#lower-case the text s = s.lower() # remove punctuation that is not word-internal", "train_test_split # function for transforming documents into counts from sklearn.feature_extraction.text import CountVectorizer #", "news['TEXT'] = [normalize_text(s) for s in news['TITLE']] news['CATEGORY'].unique() # pull the data into", "= clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution - a big hazard", "# Instantiate the classifier: clf clf = RandomForestClassifier() # Fit the classifier to", "for ch in string.punctuation: s = s.replace(ch, \" \") # make sure we", "for a given title def predict_cat(title): title=title.lower() cat_names = {'b' : 'business', 't'", "{}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution -", "test sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) # Instantiate the", "our data #Normalize the title news['TEXT'] = [normalize_text(s) for s in news['TITLE']] news['CATEGORY'].unique()", "confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution - a big hazard for children\"", "CSV file I/O (e.g. pd.read_csv) from sklearn.ensemble import RandomForestClassifier # function to split", "the category for a given title def predict_cat(title): title=title.lower() cat_names = {'b' :", "# function for encoding categories from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix", "encoding categories from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix from sklearn.metrics import", "\" \") # make sure we didn't introduce any double spaces s =", "sklearn.ensemble import RandomForestClassifier # function to split the data for cross-validation from sklearn.model_selection", "= {'b' : 'business', 't' : 'science and technology', 'e' : 'entertainment', 'm'", "title news['TEXT'] = [normalize_text(s) for s in news['TITLE']] news['CATEGORY'].unique() # pull the data", "import RandomForestClassifier # function to split the data for cross-validation from sklearn.model_selection import", "s = s.lower() # remove punctuation that is not word-internal (e.g., hyphens, apostrophes)", "classifier to the training data clf.fit(x_train, y_train) # Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test,", "ch in string.punctuation: s = s.replace(ch, \" \") # make sure we didn't", "clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a look at our", "y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution - a", "will be useful import re import string import numpy as np # linear", "normalize_text(s): #lower-case the text s = s.lower() # remove punctuation that is not", "s = re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub(' +',' ', s) return s #Function", "x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution - a big", "sklearn.metrics import classification_report from sklearn.model_selection import cross_val_score #Function to normalize the text def", "useful import re import string import numpy as np # linear algebra import", "for transforming documents into counts from sklearn.feature_extraction.text import CountVectorizer # function for encoding", "text def normalize_text(s): #lower-case the text s = s.lower() # remove punctuation that", "# remove punctuation that is not word-internal (e.g., hyphens, apostrophes) for ch in", "(e.g., hyphens, apostrophes) for ch in string.punctuation: s = s.replace(ch, \" \") #", "data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.ensemble import RandomForestClassifier # function", "to split the data for cross-validation from sklearn.model_selection import train_test_split # function for", "s = re.sub(' +',' ', s) return s #Function to predict the category", "',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub(' +',' ', s) return s", "news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a look at our data #Normalize the", "look at our data #Normalize the title news['TEXT'] = [normalize_text(s) for s in", "the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_))", "# Fit the classifier to the training data clf.fit(x_train, y_train) # Print the", "data for cross-validation from sklearn.model_selection import train_test_split # function for transforming documents into", "the data for cross-validation from sklearn.model_selection import train_test_split # function for transforming documents", "classifier: clf clf = RandomForestClassifier() # Fit the classifier to the training data", "pd.read_csv) from sklearn.ensemble import RandomForestClassifier # function to split the data for cross-validation", "train and test sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) #", "to predict the category for a given title def predict_cat(title): title=title.lower() cat_names =", "cross-validation from sklearn.model_selection import train_test_split # function for transforming documents into counts from", "from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from", "we didn't introduce any double spaces s = re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\",", "LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) # split into train and test sets x_train, x_test,", "pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.ensemble import RandomForestClassifier", "CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) # split into", "# split into train and test sets x_train, x_test, y_train, y_test = train_test_split(x,", "some libraries that will be useful import re import string import numpy as", "the classifier: clf clf = RandomForestClassifier() # Fit the classifier to the training", "be useful import re import string import numpy as np # linear algebra", "= train_test_split(x, y, test_size=0.2) # Instantiate the classifier: clf clf = RandomForestClassifier() #", "word-internal (e.g., hyphens, apostrophes) for ch in string.punctuation: s = s.replace(ch, \" \")", "any double spaces s = re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s) s =", "'m' : 'health'} clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's", "counts from sklearn.feature_extraction.text import CountVectorizer # function for encoding categories from sklearn.preprocessing import", "the text def normalize_text(s): #lower-case the text s = s.lower() # remove punctuation", "import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import cross_val_score #Function to normalize", "cross_val_score #Function to normalize the text def normalize_text(s): #lower-case the text s =", "given title def predict_cat(title): title=title.lower() cat_names = {'b' : 'business', 't' : 'science", "Instantiate the classifier: clf clf = RandomForestClassifier() # Fit the classifier to the", "Fit the classifier to the training data clf.fit(x_train, y_train) # Print the accuracy", "split the data for cross-validation from sklearn.model_selection import train_test_split # function for transforming", "function for transforming documents into counts from sklearn.feature_extraction.text import CountVectorizer # function for", "for encoding categories from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix from sklearn.metrics", "x = vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) # split into train", "utf-8 -*- \"\"\" Created on Fri Aug 10 17:37:56 2018 @author: sshekhar \"\"\"", "x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) # Instantiate the classifier: clf", "\"\"\" # get some libraries that will be useful import re import string", "that is not word-internal (e.g., hyphens, apostrophes) for ch in string.punctuation: s =", "import classification_report from sklearn.model_selection import cross_val_score #Function to normalize the text def normalize_text(s):", "'entertainment', 'm' : 'health'} clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") #", "data #Normalize the title news['TEXT'] = [normalize_text(s) for s in news['TITLE']] news['CATEGORY'].unique() #", "didn't introduce any double spaces s = re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s)", ": 'entertainment', 'm' : 'health'} clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\")", "LabelEncoder from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import cross_val_score", "from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import cross_val_score #Function", "# function for transforming documents into counts from sklearn.feature_extraction.text import CountVectorizer # function", "sure we didn't introduce any double spaces s = re.sub('\\s+',' ',s) s =", "\"||DIG||\",s) s = re.sub(' +',' ', s) return s #Function to predict the", "re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub(' +',' ', s) return s #Function to predict", "the data into vectors vectorizer = CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder()", "introduce any double spaces s = re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s) s", ": 'business', 't' : 'science and technology', 'e' : 'entertainment', 'm' : 'health'}", "in news['TITLE']] news['CATEGORY'].unique() # pull the data into vectors vectorizer = CountVectorizer() x", "news['CATEGORY'].unique() # pull the data into vectors vectorizer = CountVectorizer() x = vectorizer.fit_transform(news['TEXT'])", "apostrophes) for ch in string.punctuation: s = s.replace(ch, \" \") # make sure", "at our data #Normalize the title news['TEXT'] = [normalize_text(s) for s in news['TITLE']]", "vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) # split into train and test", "make sure we didn't introduce any double spaces s = re.sub('\\s+',' ',s) s", "import train_test_split # function for transforming documents into counts from sklearn.feature_extraction.text import CountVectorizer", "# let's take a look at our data #Normalize the title news['TEXT'] =", "into train and test sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)", "def normalize_text(s): #lower-case the text s = s.lower() # remove punctuation that is", "[normalize_text(s) for s in news['TITLE']] news['CATEGORY'].unique() # pull the data into vectors vectorizer", "# function to split the data for cross-validation from sklearn.model_selection import train_test_split #", "from sklearn.metrics import classification_report from sklearn.model_selection import cross_val_score #Function to normalize the text", "clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution - a big hazard for", "and test sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) # Instantiate", "train_test_split(x, y, test_size=0.2) # Instantiate the classifier: clf clf = RandomForestClassifier() # Fit", "def predict_cat(title): title=title.lower() cat_names = {'b' : 'business', 't' : 'science and technology',", "file I/O (e.g. pd.read_csv) from sklearn.ensemble import RandomForestClassifier # function to split the", "Fri Aug 10 17:37:56 2018 @author: sshekhar \"\"\" # get some libraries that", "processing, CSV file I/O (e.g. pd.read_csv) from sklearn.ensemble import RandomForestClassifier # function to", "classification_report from sklearn.model_selection import cross_val_score #Function to normalize the text def normalize_text(s): #lower-case", "libraries that will be useful import re import string import numpy as np", "s = s.replace(ch, \" \") # make sure we didn't introduce any double", "re.sub(' +',' ', s) return s #Function to predict the category for a", "the title news['TEXT'] = [normalize_text(s) for s in news['TITLE']] news['CATEGORY'].unique() # pull the", "= RandomForestClassifier() # Fit the classifier to the training data clf.fit(x_train, y_train) #", "= re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub(' +',' ', s)", "transforming documents into counts from sklearn.feature_extraction.text import CountVectorizer # function for encoding categories", "that will be useful import re import string import numpy as np #", "RandomForestClassifier() # Fit the classifier to the training data clf.fit(x_train, y_train) # Print", "y_train) # Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred)", "\"\"\" Created on Fri Aug 10 17:37:56 2018 @author: sshekhar \"\"\" # get", "s #Function to predict the category for a given title def predict_cat(title): title=title.lower()", "and technology', 'e' : 'entertainment', 'm' : 'health'} clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]]", "data clf.fit(x_train, y_train) # Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test)", "clf.fit(x_train, y_train) # Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test,", "= vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) # split into train and", "import cross_val_score #Function to normalize the text def normalize_text(s): #lower-case the text s", "to normalize the text def normalize_text(s): #lower-case the text s = s.lower() #", "# get some libraries that will be useful import re import string import", "= s.replace(ch, \" \") # make sure we didn't introduce any double spaces", ": 'health'} clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take", "sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import cross_val_score #Function to", "a look at our data #Normalize the title news['TEXT'] = [normalize_text(s) for s", "sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) # Instantiate the classifier:", "17:37:56 2018 @author: sshekhar \"\"\" # get some libraries that will be useful", "y_test = train_test_split(x, y, test_size=0.2) # Instantiate the classifier: clf clf = RandomForestClassifier()", "= clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a look at", "technology', 'e' : 'entertainment', 'm' : 'health'} clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news", "text s = s.lower() # remove punctuation that is not word-internal (e.g., hyphens,", "'science and technology', 'e' : 'entertainment', 'm' : 'health'} clf_pred = clf.predict(vectorizer.transform([title])) return", "not word-internal (e.g., hyphens, apostrophes) for ch in string.punctuation: s = s.replace(ch, \"", "-*- coding: utf-8 -*- \"\"\" Created on Fri Aug 10 17:37:56 2018 @author:", "pull the data into vectors vectorizer = CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder =", "training data clf.fit(x_train, y_train) # Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred =", "= [normalize_text(s) for s in news['TITLE']] news['CATEGORY'].unique() # pull the data into vectors", "np # linear algebra import pandas as pd # data processing, CSV file", "= CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) # split", "= s.lower() # remove punctuation that is not word-internal (e.g., hyphens, apostrophes) for", "x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) # Instantiate the classifier: clf clf", "a given title def predict_cat(title): title=title.lower() cat_names = {'b' : 'business', 't' :", "y_train, y_test = train_test_split(x, y, test_size=0.2) # Instantiate the classifier: clf clf =", "import CountVectorizer # function for encoding categories from sklearn.preprocessing import LabelEncoder from sklearn.metrics", ": 'science and technology', 'e' : 'entertainment', 'm' : 'health'} clf_pred = clf.predict(vectorizer.transform([title]))", "documents into counts from sklearn.feature_extraction.text import CountVectorizer # function for encoding categories from", "string.punctuation: s = s.replace(ch, \" \") # make sure we didn't introduce any", "from sklearn.model_selection import train_test_split # function for transforming documents into counts from sklearn.feature_extraction.text", "sshekhar \"\"\" # get some libraries that will be useful import re import", "\") # make sure we didn't introduce any double spaces s = re.sub('\\s+','", "return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a look at our data", "y = encoder.fit_transform(news['CATEGORY']) # split into train and test sets x_train, x_test, y_train,", "from sklearn.ensemble import RandomForestClassifier # function to split the data for cross-validation from", "s.lower() # remove punctuation that is not word-internal (e.g., hyphens, apostrophes) for ch", "s = re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub(' +',' ',", "cat_names = {'b' : 'business', 't' : 'science and technology', 'e' : 'entertainment',", "double spaces s = re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub('", "-*- \"\"\" Created on Fri Aug 10 17:37:56 2018 @author: sshekhar \"\"\" #", "s in news['TITLE']] news['CATEGORY'].unique() # pull the data into vectors vectorizer = CountVectorizer()", "remove punctuation that is not word-internal (e.g., hyphens, apostrophes) for ch in string.punctuation:", "(e.g. pd.read_csv) from sklearn.ensemble import RandomForestClassifier # function to split the data for", "spaces s = re.sub('\\s+',' ',s) s = re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub(' +','", "title=title.lower() cat_names = {'b' : 'business', 't' : 'science and technology', 'e' :", "categories from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report", "print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution", "on Fri Aug 10 17:37:56 2018 @author: sshekhar \"\"\" # get some libraries", "# linear algebra import pandas as pd # data processing, CSV file I/O", "return s #Function to predict the category for a given title def predict_cat(title):", "'health'} clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a", "'business', 't' : 'science and technology', 'e' : 'entertainment', 'm' : 'health'} clf_pred", "coding: utf-8 -*- \"\"\" Created on Fri Aug 10 17:37:56 2018 @author: sshekhar", "confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import cross_val_score #Function to normalize the", "into counts from sklearn.feature_extraction.text import CountVectorizer # function for encoding categories from sklearn.preprocessing", "for cross-validation from sklearn.model_selection import train_test_split # function for transforming documents into counts", "vectors vectorizer = CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y = encoder.fit_transform(news['CATEGORY'])", "10 17:37:56 2018 @author: sshekhar \"\"\" # get some libraries that will be", "#Function to predict the category for a given title def predict_cat(title): title=title.lower() cat_names", "x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution - a big hazard for children\" print", "s.replace(ch, \" \") # make sure we didn't introduce any double spaces s", "# make sure we didn't introduce any double spaces s = re.sub('\\s+',' ',s)", "', s) return s #Function to predict the category for a given title", "string import numpy as np # linear algebra import pandas as pd #", "clf = RandomForestClassifier() # Fit the classifier to the training data clf.fit(x_train, y_train)", "into vectors vectorizer = CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y =", "function to split the data for cross-validation from sklearn.model_selection import train_test_split # function", "vectorizer = CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) #", "= pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a look at our data #Normalize the title", "let's take a look at our data #Normalize the title news['TEXT'] = [normalize_text(s)", "clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news = pd.read_csv(\"data/uci-news-aggregator.csv\") # let's take a look", "as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.ensemble import", "punctuation that is not word-internal (e.g., hyphens, apostrophes) for ch in string.punctuation: s", "# Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test,", "as np # linear algebra import pandas as pd # data processing, CSV", "accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular", "print(classification_report(y_test, x_test_clv_pred, target_names=encoder.classes_)) randomtitle=\"vehicular pollution - a big hazard for children\" print predict_cat(randomtitle)", "to the training data clf.fit(x_train, y_train) # Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test)))", "+',' ', s) return s #Function to predict the category for a given", "from sklearn.model_selection import cross_val_score #Function to normalize the text def normalize_text(s): #lower-case the", "title def predict_cat(title): title=title.lower() cat_names = {'b' : 'business', 't' : 'science and", "sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection", "Print the accuracy print(\"Accuracy: {}\".format(clf.score(x_test, y_test))) x_test_clv_pred = clf.predict(x_test) confusion_matrix(y_test, x_test_clv_pred) print(classification_report(y_test, x_test_clv_pred,", "hyphens, apostrophes) for ch in string.punctuation: s = s.replace(ch, \" \") # make", "test_size=0.2) # Instantiate the classifier: clf clf = RandomForestClassifier() # Fit the classifier", "data into vectors vectorizer = CountVectorizer() x = vectorizer.fit_transform(news['TEXT']) encoder = LabelEncoder() y", "predict_cat(title): title=title.lower() cat_names = {'b' : 'business', 't' : 'science and technology', 'e'", "pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.ensemble", "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from", "category for a given title def predict_cat(title): title=title.lower() cat_names = {'b' : 'business',", "is not word-internal (e.g., hyphens, apostrophes) for ch in string.punctuation: s = s.replace(ch,", "encoder = LabelEncoder() y = encoder.fit_transform(news['CATEGORY']) # split into train and test sets", "get some libraries that will be useful import re import string import numpy", "numpy as np # linear algebra import pandas as pd # data processing,", "= re.sub(' +',' ', s) return s #Function to predict the category for", "the classifier to the training data clf.fit(x_train, y_train) # Print the accuracy print(\"Accuracy:", "# -*- coding: utf-8 -*- \"\"\" Created on Fri Aug 10 17:37:56 2018", "@author: sshekhar \"\"\" # get some libraries that will be useful import re", "linear algebra import pandas as pd # data processing, CSV file I/O (e.g.", "import LabelEncoder from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import", "'e' : 'entertainment', 'm' : 'health'} clf_pred = clf.predict(vectorizer.transform([title])) return cat_names[encoder.inverse_transform(clf_pred)[0]] news =", "predict the category for a given title def predict_cat(title): title=title.lower() cat_names = {'b'", "Aug 10 17:37:56 2018 @author: sshekhar \"\"\" # get some libraries that will", "'t' : 'science and technology', 'e' : 'entertainment', 'm' : 'health'} clf_pred =", "s) return s #Function to predict the category for a given title def", "sklearn.feature_extraction.text import CountVectorizer # function for encoding categories from sklearn.preprocessing import LabelEncoder from", "= re.sub(\"[0-9]+\", \"||DIG||\",s) s = re.sub(' +',' ', s) return s #Function to", "the text s = s.lower() # remove punctuation that is not word-internal (e.g.,", "algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)", "= encoder.fit_transform(news['CATEGORY']) # split into train and test sets x_train, x_test, y_train, y_test", "take a look at our data #Normalize the title news['TEXT'] = [normalize_text(s) for", "from sklearn.feature_extraction.text import CountVectorizer # function for encoding categories from sklearn.preprocessing import LabelEncoder", "I/O (e.g. pd.read_csv) from sklearn.ensemble import RandomForestClassifier # function to split the data", "news['TITLE']] news['CATEGORY'].unique() # pull the data into vectors vectorizer = CountVectorizer() x =", "import numpy as np # linear algebra import pandas as pd # data", "import string import numpy as np # linear algebra import pandas as pd" ]
[ "in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter", "random_state=42) # In[31]: # speichert train-, validation- und testset von disciplines_only in csv-Dateien", "[\"Épistémologie & histoire de la géographie_T\", \"Espace_T\", \"société et territoire_T\", \"Géographie : politique_T\",", "{k.lower(): [i.lower() for i in v] for k, v in themes_dic.items()} print(\"THEMES:\") for", "key, value in sorted(vocab.items()): v.write(\"%s : %s\\n\" % (key, value)) # In[4]: print(identset[1000])", "= get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) # schreibt die Anzahl der", "Label in txt-Datei # (disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for", "in zip(z_train_al, y_train_al, X_train_al): labellist = \", \".join(labels) textlist = \" \".join(texts) train_al.writerow([ident,", "tmp_all_labels = [] tmp_themes = [] tmp_disciplines = [] #print(\"\\nlabels in y an", "operations import csv # module for csv output from sklearn.model_selection import train_test_split #", "tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\",", "insgesammt:\", len(disciplines_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei #", "= \", \" for label in y: for l in label: if l", "et sociaux_T\", \"Politiques et actions publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études", "mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for key, value in", "encoding=\"utf8\") as rl: for key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" % (key,", "= get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt die Anzahl", "#tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d'", "%s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[17]: # reduced_labels", "newline='', encoding=\"utf-8\") as test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"])", "identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset) # In[30]: # splittet all_labels in", "module for visualization from wordcloud import WordCloud # module for wordclouds # In[2]:", "In[30]: # splittet all_labels in train- und testset # x = text, y", "= norm_text.split() vocab = {} for word in tokens: if word in vocab:", "weniger als 100 Blogbeiträge zugeordnet sind\"\"\" small_classes = [] reduced_labelset = [] for", "= [] tmp_themes = [] tmp_disciplines = [] #print(\"\\nlabels in y an der", "reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in", "as v: for key, value in sorted(vocab.items()): v.write(\"%s : %s\\n\" % (key, value))", "weniger als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) #", "textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist = ['et', 'du', 'études', 'de',", "y_train_al, X_train_al): labellist = \", \".join(labels) textlist = \" \".join(texts) train_al.writerow([ident, labellist, textlist])", "'w', newline='', encoding=\"utf-8\") as test_al_csv: test_al = csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\", \"classes\",", "\"Démographie_T\", \"Étude des genres_T\", \"Sociologie de la consommation_T\", \"Sociologie de la culture_T\", \"Sociologie", "urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe centrale et orientale_T\", \"Mondes russes et soviétiques_T\",", "= 0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]: # speichert train-, validation- und testset von", "disciplines = [] for i, elements in enumerate(y): tmp_all_labels = [] tmp_themes =", "stratify=all_labels_reduced, random_state=42) # In[11]: # speichert train- und testset von all_labels in csv-Dateien", "= WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) # Display the generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\")", "# löscht kleine Klassen (<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen, denen weniger", "word in vocab: vocab[word] += 1 else: vocab[word] = 1 \"\"\" # read", "Blogbeiträge zugeordnet sind\"\"\" small_classes = [] reduced_labelset = [] for key, value in", "textset) # In[30]: # splittet all_labels in train- und testset # x =", "\"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas Moyen Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"],", "texts]) # In[24]: # splittet all_labels in train- und testset # x =", "vocab # split identnumber, nace-code-list and corporate purpose and save in lists def", ": [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\" :", "ident, labels, texts in zip(identset, reduced_labels_reduced, textset): labellist = \", \".join(labels) textlist =", "Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing des Korpus \"\"\"", "'»', '≥', '<', '>', '^']: norm_text = norm_text.replace(char, ' ') tokens = norm_text.split()", "dow.writerow([ident, labels, texts]) # In[29]: # Für den Blog des Archivs der Erzdiözese", "'!', '?', '…','·', '·', '\"', '„', '“', '”', \"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\",", "do.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[27]:", "\"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études des sciences_T\"", "= remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_reduced_labels))", "\"Études urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe centrale et orientale_T\",", "['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', ',', ';',", "Preprocessing des Korpus \"\"\" # file: input data # def __init__(self, file, x,", "tokens: #if token in stopWords: if token in stopwords: continue else: words.append(token)\"\"\" #return", "et territoire_T\", \"Géographie : politique_T\", \"culture et représentation_T\", \"Géographie appliquée et aménagement_T\", \"Géographie", "vocabulary # In[3]: # get corpus from disk identset, labelset, textset, vocab =", "des sciences_T\", \"Sociologie des sciences_T\"], \"Études du politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides", "santé_T\", \"Sociologie du travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport et loisirs_T\"]} themes_dic =", "Text: - transformiert alles in Kleinschrift - löscht Satz- und Sonderzeichen\"\"\" norm_text =", "et développement_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Arts et humanités_D\" :", "%s\" % (key, value)) print(len(list(themes_dic))) # In[13]: disciplines_dic = {\"administration publique et développement_D\"", "\"Sciences de l'information et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce et", "for ident, labels, texts in zip(z_test_to, y_test_to, X_test_to): labellist = \", \".join(labels) textlist", "\"Politiques et actions publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\" :", ": [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie et anthropologie_D\" : [\"Anthropologie_D\", \"Études", "themes_only dic reduced (<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen", "\"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport et loisirs_T\"]} themes_dic = {k.lower(): [i.lower() for i", "from sklearn.model_selection import train_test_split # module to split data into train and test", "encoding=\"utf-8\") as test_al_csv: test_al = csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for", "textset, vocab = MyCorpus(file) # save vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as", "# disciplines_only dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced)", "mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for key, value", "# In[32]: # Histogramm: Blogs pro all_labels (besser in excel visualisieren) height =", "the y-axis plt.yticks(y_pos, bars) # Show graphic plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg',", "(key, value)) print(\"%s: %s\" % (key, value)) # In[28]: # schreibt filename, classes,", "de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes", ": [\"Linguistique appliquée_D\", \"Théorie du langage et linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\" :", "in texts: #print(\"\\n text in iter:\", text) yield text # preprocessing #========================== #", "value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key,", "'5', '6', '7', '8', '9', '.', ',', ';', ':', '!', '?', '…','·', '·',", "= \" \".join(texts) train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_do_csv:", "100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s", ": [\"Éducation et sciences de l'éducation_D\", \"Éducation : disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études", "def delete_blog(identset, labelset, textset): idents = [] labels = [] texts = []", "= [] for element in elements: if element in small_classes: continue else: tmp_labels.append(element)", "#print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for x in tmp_label1]", "\", \".join(labels) textlist = \" \".join(texts) test_to.writerow([ident, labellist, textlist]) # # Erstellung des", "#print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3])", "themes_only # In[21]: # themes_only dic themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte", "des sciences_T\", \"Philosophie des sciences_T\", \"Sociologie des sciences_T\"], \"Études du politique_T\" : [\"Guerres_T\",", "for key, value in disciplines_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(disciplines_dic))) # In[14]:", "classes, text von all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as al_csv:", "rl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[17]:", "key, value in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" %", "'../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures) # In[32]: # Histogramm: Blogs pro all_labels (besser", "culturelle_T\", \"Histoire de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de la vie_T\",", "as one item of a list (one document = identnumber, nace-code-list + text)", "# def __init__(self, file, x, y): def __init__(self, file): self.file = file #", "\".join(texts) test_al.writerow([ident, labellist, textlist]) # # Erstellung des Korpus reduced_labels # In[12]: #", "test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_rl, y_test_rl, X_test_rl): labellist =", "print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]: def get_label_dic(y): \"\"\"Erstellt ein dictionary zur Anzahl der", "'=', '#', '«', '»', '≥', '<', '>', '^']: norm_text = norm_text.replace(char, ' ')", "\"Sociologie_D\", \"Études féministes_D\"], \"Travail social et politique sociale_D\" : [\"Études des relations interethniques_D\",", "culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\", \"Asie du", "politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études des sciences_T\" : [\"Histoire des sciences_T\", \"Philosophie", "Blogs pro all_labels (besser in excel visualisieren) height = list(all_labels_dic.values()) bars = list(all_labels_dic.keys())", "encoding=\"utf-8\") as rl_csv: rlw = csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for", "zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label = \", \" for label in y:", "de l'Histoire_T\", \"Vie de la recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe", "value)) # In[9]: # schreibt filename, classes, text von all_labels in csv-Datei with", "[] for element in elements: if element in small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels)", "open(folder+'/german_stopwords_plain.txt', 'r').read() for token in tokens: #if token in stopWords: if token in", "get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte Klassen", "MyCorpus(object): \"\"\" Preprocessing des Korpus \"\"\" # file: input data # def __init__(self,", "y_train_rl, X_train_rl): labellist = \", \".join(labels) textlist = \" \".join(texts) train_rl.writerow([ident, labellist, textlist])", "= \";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, reduced_labels_reduced, textset):", "in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value))", "[] labels = [] texts = [] for ident, label, text in zip(identset,", "# In[19]: # splittet all_labels in train- und testset # x = text,", ": [\"Éthique_D\", \"Politique et services de santé_D\", \"Sciences et pratiques des soins_D\", \"Biomédecine_D\",", "else: labelcount_dic[l] = 1 return labelcount_dic # In[6]: # löscht kleine Klassen (<100)", "\"text\"]) for ident, labels, texts in zip(z_train_rl, y_train_rl, X_train_rl): labellist = \", \".join(labels)", "\"Éducation spécialisée_D\"], \"Études environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie et développement_D\"", "\"Économie_D\" : [\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation et sciences de", "religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\", \"Asie du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\",", "disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as do_csv: dow = csv.writer(do_csv,", "2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\",", "den Blog des Archivs der Erzdiözese Salzburg (aes) wurden keine Disziplinen gewählt, #", "= get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count = 0 classes = list(all_labels_dic) #", "for element in elements: if element in small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return", "validation- und testset von themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as", "names on the y-axis plt.yticks(y_pos, bars) # Show graphic plt.show() # Save as", "# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only reduziert", "\"Études des sciences_T\" : [\"Histoire des sciences_T\", \"Philosophie des sciences_T\", \"Sociologie des sciences_T\"],", "z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42) # In[25]: #", "__init__(self, file, x, y): def __init__(self, file): self.file = file # memory friendlys", "et humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités", "# In[17]: # reduced_labels reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic", "#print(\"\\nnew labelset:\", labels) return labels, themes, disciplines reduced_labels, themes_only, disciplines_only = reduce_labels(labelset) #", "<reponame>MaLuHart/Blogpost-Classification # coding: utf-8 # # Preprocessing der Texte # # Autorin: <NAME>", "de l'information et bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"],", "textlist = \" \".join(texts) rlw.writerow([ident, labellist, textlist]) # In[19]: # splittet all_labels in", "test_to.writerow([ident, labellist, textlist]) # # Erstellung des Korpus disciplines_only # In[26]: # disciplines_only", "',', ';', ':', '!', '?', '…','·', '·', '\"', '„', '“', '”', \"´\", \"`\",", "pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information et de la communication_D\" :", "vocab return tokens, vocab # split identnumber, nace-code-list and corporate purpose and save", "Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du", "pictures = '../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures) # In[32]: # Histogramm: Blogs pro", "mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for key, value in", "memory friendlys because doesn't load the corpus into memory! def __iter__(self): openfile =", "\"\"\"Splittet jede Zeile des eingelesenen Dokuments in die drei Listen 1) filnames 2)", "words.append(token)\"\"\" #return words, vocab #return norm_text, vocab return tokens, vocab # split identnumber,", "In[27]: # disciplines_only dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic =", "document in enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label = [] if", "% (key, value)) # In[22]: # themes_only dic reduced (<100) themes_only_reduced, small_classes_themes_only =", "in labelset: tmp_labels = [] for element in elements: if element in small_classes:", "= get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte", "\"Pays baltes et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie & histoire de", "norm_text.split() vocab = {} for word in tokens: if word in vocab: vocab[word]", "in blacklist: textliste.remove(element) text = str(textliste).replace(\"'\", \"\") # Create the wordcloud object wordcloud", "train_al_csv: train_al = csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "= \" \".join(texts) train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_al_csv:", "for key, value in vocab.items(): if key in vocabulary: vocabulary[key] += value else:", "elements: #print(\"\\nLabel:\", element) # themes for key, value in themes_dic.items(): if element ==", "with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter = \";\")", "Disziplinen gewählt, # dementsprechend wird dieser Blog aus disciplines_only entfernt def delete_blog(identset, labelset,", "value in themes_dic.items(): if element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif", "pro Label in txt-Datei # (themes_only reduziert auf Labels mit mehr als 100", "la santé publique_D\" : [\"Éthique_D\", \"Politique et services de santé_D\", \"Sciences et pratiques", "newline='', encoding=\"utf-8\") as rl_csv: rlw = csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\", \"classes\", \"text\"])", "= [] text = [] vocabulary = {} # first row is headline", "= document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2", "reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit", "small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes # # Erstellung des Korpus", "0.20, stratify=themes_only_reduced, random_state=42) # In[25]: # speichert train-, validation- und testset von themes_only", "encoding=\"utf8\") as dor: for key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" % (key,", "# In[12]: # themes themes_dic = {\"Afrique_T\" : [\"Afrique du nord_T\", \"Algérie_T\", \"Afrique", "i in v] for k, v in disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value in", "= \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_rl, y_test_rl, X_test_rl):", "(tmp_label1 + tmp_label2) #print(tmp_label) tmp_label = [x.strip() for x in tmp_label] ident.append(tmp_ident) label.append(tmp_label)", "text von themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as to_csv: tow", "\"Belgique_T\", \"Europe centrale et orientale_T\", \"Mondes russes et soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\",", "print(\"%s: %s\" % (key, value)) # In[22]: # themes_only dic reduced (<100) themes_only_reduced,", "\"Administration publique_D\"], \"Sociologie et anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail", "as do_csv: dow = csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "for i in v] for k, v in disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value", "reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste", "in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as rl_csv: rlw = csv.writer(rl_csv, delimiter", "newline='', encoding=\"utf-8\") as train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"])", "labels, texts in zip(z_test_al, y_test_al, X_test_al): labellist = \", \".join(labels) textlist = \"", "for i in v] for k, v in themes_dic.items()} print(\"THEMES:\") for key, value", "X_test_to): labellist = \", \".join(labels) textlist = \" \".join(texts) test_to.writerow([ident, labellist, textlist]) #", "(aes) wurden keine Disziplinen gewählt, # dementsprechend wird dieser Blog aus disciplines_only entfernt", "in textliste: if element in blacklist: textliste.remove(element) text = str(textliste).replace(\"'\", \"\") # Create", "vocabulary: vocabulary[key] += value else: vocabulary[key] = value return ident, label, text, vocabulary", "\"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\"", "= \", \".join(labels) textlist = \" \".join(texts) train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w',", "= [x.strip() for x in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key, value in", "\"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des", "Blogbeiträge pro Label in txt-Datei # (all_labels reduziert auf Labels mit mehr als", "pro Label in txt-Datei # (all_labels reduziert auf Labels mit mehr als 100", "identnumber, nace-code-list and corporate purpose and save in lists def split_csv(self, documents): \"\"\"Splittet", "siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à de nos jours_T\", \"XXIe siècle_T\"], \"Époque", ": [\"Histoire des femmes_T\", \"Histoire du travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\",", "\"Histoire et sociologie des médias_T\", \"Histoire et sociologie du livre_T\", \"Sciences de l'information_T\"],", "csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as to_csv: tow = csv.writer(to_csv, delimiter =", "ident, labels, texts in zip(identset, themes_only, textset): tow.writerow([ident, labels, texts]) # In[24]: #", "tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes # # Erstellung des Korpus all_labels # In[7]:", "In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als", "in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value))", "2) labels 3) text \"\"\" ident = [] label = [] text =", "dic (<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger", "return reduced_labelset, small_classes # # Erstellung des Korpus all_labels # In[7]: # all_labels", "len(disciplines_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only", "to: for key, value in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" % (key, value)) print(\"%s:", "text, y = labels, z = filnames X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to", "z = filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset,", "document as one item of a list (one document = identnumber, nace-code-list +", "x = text, y = labels, z = filnames X_train_do, X_test_do, y_train_do, y_test_do,", "vocab = {} for word in tokens: if word in vocab: vocab[word] +=", "with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_do_csv: test_do = csv.writer(test_do_csv, delimiter = \";\")", "4)[3]) #tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 =", "dictionary zur Anzahl der Blogbeiträge pro Label (Label : Anzahl der zugehörigen Blogbeiträge)\"\"\"", "data folder = '../Preprocessing' datasets = '../Datasets' file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text", "%s\" % (key, value)) # In[28]: # schreibt filename, classes, text von disciplines_only", "char in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.',", "#print(\"\\nTheme:\", key) else: (\"Element nicht gefunden:\", element) # discipilnes for key, value in", "= {} #tmp_label = \", \" for label in y: for l in", "= folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing", "\"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie du développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\",", "and save in lists def split_csv(self, documents): \"\"\"Splittet jede Zeile des eingelesenen Dokuments", "(key, value)) # In[18]: # schreibt filename, classes, text von reduced_labels in csv-Datei", "(themes_only reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as", "height = list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) # Create horizontal bars", "auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for", "for ident, labels, texts in zip(z_train_to, y_train_to, X_train_to): labellist = \", \".join(labels) textlist", "\".join(texts) train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_rl_csv: test_rl =", "for ident, labels, texts in zip(identset, reduced_labels_reduced, textset): labellist = \", \".join(labels) textlist", "wird dieser Blog aus disciplines_only entfernt def delete_blog(identset, labelset, textset): idents = []", "the wordcloud object wordcloud = WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) # Display the generated", "'1', '2', '3', '4', '5', '6', '7', '8', '9', '.', ',', ';', ':',", "labellist, textlist]) # # Erstellung des Korpus themes_only # In[21]: # themes_only dic", "in small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes # # Erstellung des", "\"\"\"Bereinigt den Text: - transformiert alles in Kleinschrift - löscht Satz- und Sonderzeichen\"\"\"", "in elements: if element in small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes", "open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\",", "transformiert alles in Kleinschrift - löscht Satz- und Sonderzeichen\"\"\" norm_text = text.lower() #", "# Erstellung des Korpus disciplines_only # In[26]: # disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only)", "with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_al_csv: train_al = csv.writer(train_al_csv, delimiter = \";\")", "all_labels in train- und testset # x = text, y = labels, z", ": [\"Édition électronique_T\", \"Histoire et sociologie de la presse_T\", \"Histoire et sociologie des", "anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences", "len(reduced_labels_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels", "Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\",", "# Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung des all_label_dics", ": [\"Balkans_T\", \"Belgique_T\", \"Europe centrale et orientale_T\", \"Mondes russes et soviétiques_T\", \"France_T\", \"Îles", "In[16]: # reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\",", "Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt die Anzahl", "# In[20]: # speichert train- und testset von all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv',", "k, v in themes_dic.items()} print(\"THEMES:\") for key, value in themes_dic.items(): print(\"%s: %s\" %", "def normalize_text(self, text): \"\"\"Bereinigt den Text: - transformiert alles in Kleinschrift - löscht", "auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for", "# split identnumber, nace-code-list and corporate purpose and save in lists def split_csv(self,", "z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]:", "and test sets import matplotlib.pyplot as plt # module for visualization from wordcloud", "\" for label in y: for l in label: if l in labelcount_dic:", "tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label)", "\"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à de nos jours_T\",", "siècle_T\"], \"Époque moderne_T\" : [\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\"", "#print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\",", "schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only nicht reduziert)", "#tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1)", "textlist]) # # Erstellung des Korpus themes_only # In[21]: # themes_only dic themes_only_dic", "labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter", "value)) print(\"%s: %s\" % (key, value)) # In[22]: # themes_only dic reduced (<100)", "anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail social et politique sociale_D\"", "list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) # Create horizontal bars plt.barh(y_pos, height)", "remove punctuation and stopwords def normalize_text(self, text): \"\"\"Bereinigt den Text: - transformiert alles", "reduce labels to highest level def reduce_labels(y): \"\"\"Reduziert die Themen und Disziplinen auf", "in txt-Datei # (reduced_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen) with", "# In[16]: # reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte Klassen", "[\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements politiques et", "zugeordnet sind\"\"\" small_classes = [] reduced_labelset = [] for key, value in label_dic.items():", "y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42)", "Blogbeiträge pro Label in txt-Datei # (reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as", "tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"',", "key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" %", "element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif element in value: tmp_all_labels.append(key)", "open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as al_csv: alw = csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\",", "# (themes_only reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\")", "économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\",", "disciplines_only, textset): dow.writerow([ident, labels, texts]) # In[29]: # Für den Blog des Archivs", "disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste", "print(\"%s: %s\" % (key, value)) print(len(list(disciplines_dic))) # In[14]: # reduce labels to highest", "(key, value)) print(len(list(disciplines_dic))) # In[14]: # reduce labels to highest level def reduce_labels(y):", "\"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management et administration_D\" : [\"Ergonomie_D\", \"Travail et relations", "MyCorpus(file) # save vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for key,", "tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else:", ": [\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\",", "identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]: # speichert train-, validation- und", "rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, reduced_labels_reduced, textset): labellist =", "csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_to,", "\"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études des sciences_T\" : [\"Histoire des sciences_T\", \"Philosophie des", "sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) #", "= [] for ident, label, text in zip(identset, labelset, textset): if ident.startswith('aes_'): continue", "value in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key,", "[x.strip()+'_d' for x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t'", "reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]: # speichert train- und", "= labels, z = filnames X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset,", "ident, labels, texts in zip(z_train_rl, y_train_rl, X_train_rl): labellist = \", \".join(labels) textlist =", "travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" :", "tmp_label = [] if re.match(\"aes_\", tmp_ident): # Blog \"aes - <NAME>\" hat nur", "\"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement et de représentation_T\", \"Méthodes qualitatives_T\",", "for ident, label, text in zip(identset, labelset, textset): if ident.startswith('aes_'): continue else: idents.append(ident)", "= '../Datasets' file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets) class", "\"Géographie : politique_T\", \"culture et représentation_T\", \"Géographie appliquée et aménagement_T\", \"Géographie rurale_T\", \"Géographie", "testset # x = text, y = labels, z = filnames X_train_do, X_test_do,", "= 1 \"\"\" # read stopwords words = [] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read()", "disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als 100", "(<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen, denen weniger als 100 Blogbeiträge zugeordnet", "jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\" : [\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe", "save in lists def split_csv(self, documents): \"\"\"Splittet jede Zeile des eingelesenen Dokuments in", "labels, texts identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset) # In[30]: # splittet", "X_test_rl): labellist = \", \".join(labels) textlist = \" \".join(texts) test_rl.writerow([ident, labellist, textlist]) #", "Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label = \", \" for label in y: for", "loisirs_T\"]} themes_dic = {k.lower(): [i.lower() for i in v] for k, v in", "développement_D\", \"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"],", "train_to_csv: train_to = csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "\";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_al, y_train_al, X_train_al): labellist", "[\"Histoire des religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\",", "print(disciplines_only[1000]) # In[16]: # reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte", "= filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size", "# Class for accessing and preprocessing the data folder = '../Preprocessing' datasets =", "v.write(\"%s : %s\\n\" % (key, value)) # In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset))", "= list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) # Create horizontal bars plt.barh(y_pos,", "la presse_T\", \"Histoire et sociologie des médias_T\", \"Histoire et sociologie du livre_T\", \"Sciences", "[] for i, elements in enumerate(y): tmp_all_labels = [] tmp_themes = [] tmp_disciplines", "'@', '€', '&', '%', '&', '+', '*', '=', '#', '«', '»', '≥', '<',", "dic themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) # schreibt die", "# In[27]: # disciplines_only dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic", "tokens, vocab # split identnumber, nace-code-list and corporate purpose and save in lists", "enumerate(y): tmp_all_labels = [] tmp_themes = [] tmp_disciplines = [] #print(\"\\nlabels in y", "= \", \".join(labels) textlist = \" \".join(texts) test_al.writerow([ident, labellist, textlist]) # # Erstellung", "\";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_rl, y_train_rl, X_train_rl): labellist", "pro Label in txt-Datei # (themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to:", "der Erzdiözese Salzburg (aes) wurden keine Disziplinen gewählt, # dementsprechend wird dieser Blog", "train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_al_csv: test_al = csv.writer(test_al_csv,", "et orientale_T\", \"Mondes russes et soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\",", "as rl: for key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" % (key, value))", "labellist = \", \".join(labels) textlist = \" \".join(texts) train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv',", "[\"Linguistique appliquée_D\", \"Théorie du langage et linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\" : [\"Études", "tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1", "= [] for i, elements in enumerate(y): tmp_all_labels = [] tmp_themes = []", ": [\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation et sciences de l'éducation_D\",", "all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count = 0 classes = list(all_labels_dic)", "\"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\"", "print(len(labelset)) print(len(textset)) # In[5]: def get_label_dic(y): \"\"\"Erstellt ein dictionary zur Anzahl der Blogbeiträge", "value)) count += value print(\"Anzahl der vergebenen Labels:\", count) # In[8]: all_labels_reduced, small_classes_all_labels", "textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist = ['et', 'du', 'études', 'de', 'des',", "la consommation_T\", \"Sociologie de la culture_T\", \"Sociologie de la santé_T\", \"Sociologie du travail_T\",", "\"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de la santé et de", "in zip(identset, labelset, textset): if ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label) texts.append(text) return idents,", "# In[23]: # schreibt filename, classes, text von themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv',", "split data into train and test sets import matplotlib.pyplot as plt # module", "open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as do_csv: dow = csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\",", "in vocabulary: vocabulary[key] += value else: vocabulary[key] = value return ident, label, text,", "if element in small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes # #", "reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in", "print(\"THEMES:\") for key, value in themes_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(themes_dic))) #", "train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_to, y_train_to, X_train_to): labellist =", "self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1 = document.split(\";\", 2)[1].strip()", "australe_T\", \"Afrique centrale_T\", \"Afrique de l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\",", "sorted(vocab.items()): v.write(\"%s : %s\\n\" % (key, value)) # In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset))", "\"classes\", \"text\"]) for ident, labels, texts in zip(z_train_to, y_train_to, X_train_to): labellist = \",", "du droit_T\", \"Sociologie du droit_T\"], \"Économie_T\" : [\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\",", ": [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas Moyen Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\"", "print(\"%s: %s\" % (key, value)) count += value print(\"Anzahl der vergebenen Labels:\", count)", "if word in vocab: vocab[word] += 1 else: vocab[word] = 1 \"\"\" #", "\"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie", "und Disziplinen auf die höchste Hierarchiestufe\"\"\" labels = [] # new y themes", "texts in zip(z_test_do, y_test_do, X_test_do): labellist = \", \".join(labels) textlist = \" \".join(texts)", "\"Japon_T\", \"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du droit_T\", \"Sociologie", "in zip(z_train_do, y_train_do, X_train_do): labellist = \", \".join(labels) textlist = \" \".join(texts) train_do.writerow([ident,", "# module for wordclouds # In[2]: # Class for accessing and preprocessing the", "\";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_rl, y_test_rl, X_test_rl): labellist", "\"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie", "jede Zeile des eingelesenen Dokuments in die drei Listen 1) filnames 2) labels", "list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) # Create horizontal bars plt.barh(y_pos, height) # Create names", "[] reduced_labelset = [] for key, value in label_dic.items(): if value < 100:", ": [\"Épistémologie & histoire de la géographie_T\", \"Espace_T\", \"société et territoire_T\", \"Géographie :", "numpy as np import re # module for regular expression operations import csv", "\".join(labels) textlist = \" \".join(texts) test_al.writerow([ident, labellist, textlist]) # # Erstellung des Korpus", "des religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire", "= self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\")", "zip(z_test_rl, y_test_rl, X_test_rl): labellist = \", \".join(labels) textlist = \" \".join(texts) test_rl.writerow([ident, labellist,", "in txt-Datei # (themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for key,", "Label in txt-Datei # (all_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen)", "mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for key, value", "reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt die", "all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_rl_csv: train_rl = csv.writer(train_rl_csv,", "x = text, y = labels, z = filnames X_train_to, X_test_to, y_train_to, y_test_to,", "Erstellung des Korpus disciplines_only # In[26]: # disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf", "\"Vie de la recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\",", "\"Sociologie politique_T\"], \"Études urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe centrale", "+= value else: vocabulary[key] = value return ident, label, text, vocabulary # In[3]:", "labels, themes, disciplines reduced_labels, themes_only, disciplines_only = reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000])", "margin=0, background_color=\"white\").generate(text) # Display the generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show()", "reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene", "zip(z_train_rl, y_train_rl, X_train_rl): labellist = \", \".join(labels) textlist = \" \".join(texts) train_rl.writerow([ident, labellist,", "\";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, themes_only, textset): tow.writerow([ident,", "of a list (one document = identnumber, nace-code-list + text) documents = openfile.readlines()", "csv # module for csv output from sklearn.model_selection import train_test_split # module to", "des sciences_D\", \"Histoire des sciences sociales_D\"], \"Langue et linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie", "lists def split_csv(self, documents): \"\"\"Splittet jede Zeile des eingelesenen Dokuments in die drei", "siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à de nos jours_T\", \"XXIe", "Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei", "\"Monde germanique_T\", \"Pays baltes et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie &", "y = labels, z = filnames X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al =", "len(themes_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only", "bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\"", "= 0 classes = list(all_labels_dic) # schreibt die Anzahl der Blogbeiträge pro Label", "themes_dic = {\"Afrique_T\" : [\"Afrique du nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\", \"Afrique", "item of a list (one document = identnumber, nace-code-list + text) documents =", "in themes_dic.items(): if element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif element", "sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) count", "In[ ]: pictures = '../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures) # In[32]: # Histogramm:", "if ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label) texts.append(text) return idents, labels, texts identset, disciplines_only_reduced,", "1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label = [] if re.match(\"aes_\", tmp_ident): # Blog \"aes -", "= 1 return labelcount_dic # In[6]: # löscht kleine Klassen (<100) def remove_small_classes(labelset,", "\", \".join(labels) textlist = \" \".join(texts) test_do.writerow([ident, labellist, textlist]) # # Visualisierungen #", "% (key, value)) # In[27]: # disciplines_only dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only =", "\"culture et représentation_T\", \"Géographie appliquée et aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\",", "%s\" % (key, value)) # In[9]: # schreibt filename, classes, text von all_labels", "[] tmp_disciplines = [] #print(\"\\nlabels in y an der Stelle %s: %s\" %", "\", \".join(labels) textlist = \" \".join(texts) test_rl.writerow([ident, labellist, textlist]) # # Erstellung des", "'|', '_', '-', '–', '—', '­', '„', '“', '■', '•', '§', '$', '@',", "get corpus from disk identset, labelset, textset, vocab = MyCorpus(file) # save vocabulary", "drei Listen 1) filnames 2) labels 3) text \"\"\" ident = [] label", "with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\"", "\"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\"", "element) elif element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element nicht gefunden:\",", "= delete_blog(identset, disciplines_only_reduced, textset) # In[30]: # splittet all_labels in train- und testset", "key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\",", "testset von all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_rl_csv: train_rl", "= train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]: # speichert", "%s\\\" % (key, value)) small_classes.append(key) for elements in labelset: tmp_labels = [] for", "= csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "[\"Asie centrale_T\", \"Asie du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde persan_T\",", "small_classes.append(key) for elements in labelset: tmp_labels = [] for element in elements: if", "identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42) # In[11]: # speichert train- und testset", "# read stopwords words = [] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for token in", "themes_dic = {k.lower(): [i.lower() for i in v] for k, v in themes_dic.items()}", "mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for key, value in", "words = [] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for token in tokens: #if token", "\"Époque moderne_T\" : [\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" :", "test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "graphic plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung", "Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels reduziert auf Labels mit", "# Autorin: <NAME> # In[1]: # Imports import os import numpy as np", "# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels nicht", "text, y = labels, z = filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl", "reduce_labels(y): \"\"\"Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe\"\"\" labels = []", "des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information et de la communication_D\" : [\"Communication_D\",", "# save each document as one item of a list (one document =", "bpl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) count +=", "(reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei #", "la communication_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations", "encoding=\"utf-8\") as train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for", "# schreibt filename, classes, text von disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='',", "zip(z_train_do, y_train_do, X_train_do): labellist = \", \".join(labels) textlist = \" \".join(texts) train_do.writerow([ident, labellist,", "< 100: #print(\\\"%s : %s\\\" % (key, value)) small_classes.append(key) for elements in labelset:", "in enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label = [] if re.match(\"aes_\",", "= tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident,", "for key, value in disciplines_dic.items(): if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\",", "in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as al_csv: alw = csv.writer(al_csv, delimiter", "et Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\",", "\"Sociologie urbaine_T\", \"Sport et loisirs_T\"]} themes_dic = {k.lower(): [i.lower() for i in v]", "\"Histoire et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie des sciences_D\", \"Histoire des", "tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text", "sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\", \"Asie du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde", "vocabulary[key] += value else: vocabulary[key] = value return ident, label, text, vocabulary #", "\"Histoire des sciences sociales_D\"], \"Langue et linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie du langage", "sociale_T\"], \"Études des sciences_T\" : [\"Histoire des sciences_T\", \"Philosophie des sciences_T\", \"Sociologie des", "Texte # # Autorin: <NAME> # In[1]: # Imports import os import numpy", "file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\"", "not os.path.exists(pictures): os.makedirs(pictures) # In[32]: # Histogramm: Blogs pro all_labels (besser in excel", "recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\",", "du langage et linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\", \"Théorie et", ": %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[9]: #", "for ident, labels, texts in zip(identset, themes_only, textset): tow.writerow([ident, labels, texts]) # In[24]:", "split_csv(self, documents): \"\"\"Splittet jede Zeile des eingelesenen Dokuments in die drei Listen 1)", "\"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de la santé et de la santé", "\"Administration publique_D\"], \"Arts et humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\", \"Études", "dor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[28]:", "text.append(tmp_text) for key, value in vocab.items(): if key in vocabulary: vocabulary[key] += value", "in disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value in disciplines_dic.items(): print(\"%s: %s\" % (key, value))", "et sociologie de la presse_T\", \"Histoire et sociologie des médias_T\", \"Histoire et sociologie", "value < 100: #print(\\\"%s : %s\\\" % (key, value)) small_classes.append(key) for elements in", "insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei", "y): def __init__(self, file): self.file = file # memory friendlys because doesn't load", "print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die Anzahl der", "csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_al,", "\"Sociologie des sciences_T\"], \"Études du politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et massacres_T\",", "reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) #", "len(all_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels", "tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1", "von all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_rl_csv: train_rl =", "linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie du langage et linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\"", ": [\"Histoire des religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\", \"Histoire", "[] texts = [] for ident, label, text in zip(identset, labelset, textset): if", "get_label_dic(y): \"\"\"Erstellt ein dictionary zur Anzahl der Blogbeiträge pro Label (Label : Anzahl", "%s: %s\" % (i, elements)) for element in elements: #print(\"\\nLabel:\", element) # themes", "textlist]) # In[10]: # splittet all_labels in train- und testset # x =", "travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport et loisirs_T\"]} themes_dic = {k.lower(): [i.lower() for", "\"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie et anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études", "texts in zip(identset, themes_only, textset): tow.writerow([ident, labels, texts]) # In[24]: # splittet all_labels", "continue else: idents.append(ident) labels.append(label) texts.append(text) return idents, labels, texts identset, disciplines_only_reduced, textset =", "# themes themes_dic = {\"Afrique_T\" : [\"Afrique du nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique", "themes_only, disciplines_only = reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: # reduced_labels", "bars) # Show graphic plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') #", "(key, value)) print(\"%s: %s\" % (key, value)) # In[17]: # reduced_labels reduced dic", "publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\"", "= train_test_split(textset, themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42) # In[25]: # speichert", "Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only reduziert auf Labels mit", "gewählt, # dementsprechend wird dieser Blog aus disciplines_only entfernt def delete_blog(identset, labelset, textset):", ": [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études des sciences_T\" :", "for regular expression operations import csv # module for csv output from sklearn.model_selection", "politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe", "sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) #", "X_test_al, y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced,", "labels = [] # new y themes = [] disciplines = [] for", "de la recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\",", "der Blogbeiträge pro Label in txt-Datei # (reduced_labels reduziert auf Labels mit mehr", "% (key, value)) # In[18]: # schreibt filename, classes, text von reduced_labels in", ": %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) count += value", "- <NAME>\" hat nur Thèmes: Histoire, Religions tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3 =", "der Blogbeiträge pro Label in txt-Datei # (all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\")", "testset # x = text, y = labels, z = filnames X_train_rl, X_test_rl,", "'w', newline='', encoding=\"utf-8\") as al_csv: alw = csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\", \"classes\",", "stratify=reduced_labels_reduced, random_state=42) # In[20]: # speichert train- und testset von all_labels in csv-Dateien", "label.append(tmp_label) text.append(tmp_text) for key, value in vocab.items(): if key in vocabulary: vocabulary[key] +=", "as to: for key, value in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" % (key, value))", "value)) # In[22]: # themes_only dic reduced (<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic)", "idents = [] labels = [] texts = [] for ident, label, text", "\" \".join(texts) train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_rl_csv: test_rl", "in vocab.items(): if key in vocabulary: vocabulary[key] += value else: vocabulary[key] = value", "Themen:\", len(themes_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei #", ": [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Arts et humanités_D\" : [\"Architecture_D\", \"Arts_D\",", "\"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie & histoire de la géographie_T\", \"Espace_T\", \"société", "politique sociale_D\" : [\"Études des relations interethniques_D\", \"Études sur la famille_D\", \"Questions sociales_D\",", "= [] vocabulary = {} # first row is headline for i, document", "texts in zip(z_test_to, y_test_to, X_test_to): labellist = \", \".join(labels) textlist = \" \".join(texts)", "= \";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, all_labels_reduced, textset):", "(all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for key, value in sorted(all_labels_dic.items()):", "text in zip(identset, labelset, textset): if ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label) texts.append(text) return", "csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset,", "x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text", "insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei", "test_al_csv: test_al = csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "\"classes\", \"text\"]) for ident, labels, texts in zip(z_train_al, y_train_al, X_train_al): labellist = \",", "test_do_csv: test_do = csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "\"Arts et humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\",", ": disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"],", "ident, labels, texts in zip(z_test_to, y_test_to, X_test_to): labellist = \", \".join(labels) textlist =", "'').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist = ['et', 'du', 'études', 'de', 'des', 'la',", "X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20,", "if value < 100: #print(\\\"%s : %s\\\" % (key, value)) small_classes.append(key) for elements", "Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label", "\".join(texts) rlw.writerow([ident, labellist, textlist]) # In[19]: # splittet all_labels in train- und testset", "# module for visualization from wordcloud import WordCloud # module for wordclouds #", "pro Label in txt-Datei # (disciplines_only reduziert auf Labels mit mehr als 100", "\"Management et administration_D\" : [\"Ergonomie_D\", \"Travail et relations professionnelles_D\", \"Planification et développement_D\", \"Transports_D\",", "'■', '•', '§', '$', '@', '€', '&', '%', '&', '+', '*', '=', '#',", "#========================== # convert text to lower-case, remove punctuation and stopwords def normalize_text(self, text):", "du développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de", "Korpus disciplines_only # In[26]: # disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene", "speichert train- und testset von all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\")", "# Imports import os import numpy as np import re # module for", "\"Éducation : disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études", "# (reduced_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\")", "Themen und Disziplinen auf die höchste Hierarchiestufe\"\"\" labels = [] # new y", "value)) print(len(list(disciplines_dic))) # In[14]: # reduce labels to highest level def reduce_labels(y): \"\"\"Reduziert", "scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie et", "token in stopwords: continue else: words.append(token)\"\"\" #return words, vocab #return norm_text, vocab return", "test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_to, y_test_to, X_test_to): labellist =", "return labelcount_dic # In[6]: # löscht kleine Klassen (<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht", "'de', 'des', 'la', 'dict_keys'] for element in textliste: if element in blacklist: textliste.remove(element)", "norm_text = text.lower() # remove punctuation for char in ['0', '1', '2', '3',", "txt-Datei # (all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for key, value", "# remove punctuation for char in ['0', '1', '2', '3', '4', '5', '6',", "sciences de l'éducation_D\", \"Éducation : disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\" : [\"Études", "mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for key, value in", "= \" \".join(texts) train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_rl_csv:", "\".join(labels) textlist = \" \".join(texts) train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\")", "In[14]: # reduce labels to highest level def reduce_labels(y): \"\"\"Reduziert die Themen und", "Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label", "entfernt def delete_blog(identset, labelset, textset): idents = [] labels = [] texts =", "train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "count = 0 classes = list(all_labels_dic) # schreibt die Anzahl der Blogbeiträge pro", "100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge", "texts: #print(\"\\n text in iter:\", text) yield text # preprocessing #========================== # convert", "% (key, value)) small_classes.append(key) for elements in labelset: tmp_labels = [] for element", "[\"Histoire des femmes_T\", \"Histoire du travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire", "In[26]: # disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\",", "labelset, textset): idents = [] labels = [] texts = [] for ident,", "labellist = \", \".join(labels) textlist = \" \".join(texts) test_do.writerow([ident, labellist, textlist]) # #", "y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42)", "von all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as al_csv: alw =", "test sets import matplotlib.pyplot as plt # module for visualization from wordcloud import", ": [\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie du développement_D\", \"Psychologie éducative_D\", \"Psychologie", "for ident, labels, texts in zip(z_test_do, y_test_do, X_test_do): labellist = \", \".join(labels) textlist", "tmp_label1 = [x.strip()+'_d' for x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2", "\"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\",", "\".join(labels) textlist = \" \".join(texts) train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\")", "def __iter__(self): openfile = open(self.file, 'r', encoding='utf-8') # save each document as one", "et linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\", \"Théorie et critique littéraires_D\",", "y_test_to, X_test_to): labellist = \", \".join(labels) textlist = \" \".join(texts) test_to.writerow([ident, labellist, textlist])", "encoding=\"utf8\") as do: for key, value in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" % (key,", "\"Études sur la famille_D\", \"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic = {k.lower(): [i.lower() for", "\"Langue et linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\", \"Théorie et critique littéraires_D\", \"Littérature britannique_D\",", "%s\" % (i, elements)) for element in elements: #print(\"\\nLabel:\", element) # themes for", "and corporate purpose and save in lists def split_csv(self, documents): \"\"\"Splittet jede Zeile", "\"Planification et développement_D\", \"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\"", "l in label: if l in labelcount_dic: labelcount_dic[l] += 1 else: labelcount_dic[l] =", "to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for key, value in sorted(vocab.items()): v.write(\"%s", "ident, labels, texts in zip(identset, disciplines_only, textset): dow.writerow([ident, labels, texts]) # In[29]: #", "themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return labels, themes, disciplines reduced_labels, themes_only, disciplines_only =", "new y themes = [] disciplines = [] for i, elements in enumerate(y):", "document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab =", "get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte Themen", "wordclouds # In[2]: # Class for accessing and preprocessing the data folder =", "as to_csv: tow = csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "labels, texts in zip(identset, disciplines_only, textset): dow.writerow([ident, labels, texts]) # In[29]: # Für", "et méthodes_T\": [\"Approches biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\",", "\"classes\", \"text\"]) for ident, labels, texts in zip(z_test_do, y_test_do, X_test_do): labellist = \",", "import WordCloud # module for wordclouds # In[2]: # Class for accessing and", ": [\"Afrique du nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique de", "den Text: - transformiert alles in Kleinschrift - löscht Satz- und Sonderzeichen\"\"\" norm_text", "der Blogbeiträge pro Label in txt-Datei # (themes_only reduziert auf Labels mit mehr", "sind\"\"\" small_classes = [] reduced_labelset = [] for key, value in label_dic.items(): if", "Show graphic plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: #", "= document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab", "reduced_labels reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen", "humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\",", "z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42) # In[25]:", "remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf", "territoire_T\", \"Géographie : politique_T\", \"culture et représentation_T\", \"Géographie appliquée et aménagement_T\", \"Géographie rurale_T\",", "= text, y = labels, z = filnames X_train_do, X_test_do, y_train_do, y_test_do, z_train_do,", "der Blogbeiträge pro Label in txt-Datei # (reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\")", "small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\",", "In[22]: # themes_only dic reduced (<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic =", "#print(\"tmp_label2:\", tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text)", "de traitement et de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\",", "Blogbeiträge pro Label in txt-Datei # (themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as", "\" \".join(texts) test_to.writerow([ident, labellist, textlist]) # # Erstellung des Korpus disciplines_only # In[26]:", "vocab = MyCorpus(file) # save vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v:", "et linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie du langage et linguistique_D\", \"Langue et linguistique_D\"],", "# Create names on the y-axis plt.yticks(y_pos, bars) # Show graphic plt.show() #", "value in sorted(vocab.items()): v.write(\"%s : %s\\n\" % (key, value)) # In[4]: print(identset[1000]) print(labelset[1000])", "grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire", "Disziplinen auf die höchste Hierarchiestufe\"\"\" labels = [] # new y themes =", "value)) # In[23]: # schreibt filename, classes, text von themes_only in csv-Datei with", "stopWords: if token in stopwords: continue else: words.append(token)\"\"\" #return words, vocab #return norm_text,", "np import re # module for regular expression operations import csv # module", "disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene", "\"Historiographie_T\", \"Méthodes de traitement et de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires", "as plt # module for visualization from wordcloud import WordCloud # module for", "de la culture_T\", \"Sociologie de la santé_T\", \"Sociologie du travail_T\", \"Sociologie économique_T\", \"Sociologie", "et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"],", "politiques_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie et anthropologie_D\" : [\"Anthropologie_D\",", "classes, text von themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as to_csv:", "\"XXIe siècle_T\"], \"Époque moderne_T\" : [\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"],", "Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only reduziert auf Labels mit", "plt.yticks(y_pos, bars) # Show graphic plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png')", "(key, value)) count += value print(\"Anzahl der vergebenen Labels:\", count) # In[8]: all_labels_reduced,", "l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\",", "= \", \".join(labels) textlist = \" \".join(texts) train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w',", "in einer Wortwolke # Create a list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"',", "for elements in labelset: tmp_labels = [] for element in elements: if element", "# # Preprocessing der Texte # # Autorin: <NAME> # In[1]: # Imports", "100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt", "document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for x in tmp_label3]", "filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size =", "in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element nicht gefunden:\", element) # discipilnes", "element) # themes for key, value in themes_dic.items(): if element == key: tmp_all_labels.append(element)", "schreibt filename, classes, text von themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\")", "disciplines_only dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen", "= [] #print(\"\\nlabels in y an der Stelle %s: %s\" % (i, elements))", "print(\"Auf höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) # schreibt die Anzahl der Blogbeiträge pro", "pro Label (Label : Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label =", "Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie", "biologique_D\", \"Psychologie clinique_D\", \"Psychologie du développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\",", "'\"', '„', '“', '”', \"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\", '(', ')', '[', ']',", "Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt die Anzahl der Blogbeiträge pro Label", "tokens: if word in vocab: vocab[word] += 1 else: vocab[word] = 1 \"\"\"", "to highest level def reduce_labels(y): \"\"\"Reduziert die Themen und Disziplinen auf die höchste", "baltes et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie & histoire de la", "1 return labelcount_dic # In[6]: # löscht kleine Klassen (<100) def remove_small_classes(labelset, label_dic):", "siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études des", "Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro", "reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als 100", "z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]:", "split identnumber, nace-code-list and corporate purpose and save in lists def split_csv(self, documents):", "newline='', encoding=\"utf-8\") as do_csv: dow = csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\", \"classes\", \"text\"])", "X_train_rl): labellist = \", \".join(labels) textlist = \" \".join(texts) train_rl.writerow([ident, labellist, textlist]) with", "%s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[18]: # schreibt", "reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: # reduced_labels dic reduced_labels_dic =", "[\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie du développement_D\", \"Psychologie", "alw = csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "= \" \".join(texts) test_rl.writerow([ident, labellist, textlist]) # # Erstellung des Korpus themes_only #", "# In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]: def get_label_dic(y): \"\"\"Erstellt", "delete_blog(identset, disciplines_only_reduced, textset) # In[30]: # splittet all_labels in train- und testset #", "\"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du droit_T\", \"Sociologie du droit_T\"], \"Économie_T\"", "#print(all_labels_dic) count = 0 classes = list(all_labels_dic) # schreibt die Anzahl der Blogbeiträge", "[] disciplines = [] for i, elements in enumerate(y): tmp_all_labels = [] tmp_themes", "z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42) # In[11]:", "labellist = \", \".join(labels) textlist = \" \".join(texts) test_rl.writerow([ident, labellist, textlist]) # #", "'9', '.', ',', ';', ':', '!', '?', '…','·', '·', '\"', '„', '“', '”',", "labelcount_dic = {} #tmp_label = \", \" for label in y: for l", "\"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie", "background_color=\"white\").generate(text) # Display the generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show() #", "# (disciplines_only reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\")", "% (key, value)) print(\"%s: %s\" % (key, value)) # In[23]: # schreibt filename,", ": %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[23]: #", "rlr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[18]:", "headline for i, document in enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label", "# In[25]: # speichert train-, validation- und testset von themes_only in csv-Dateien with", "Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung des all_label_dics in", "classes, text von reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as rl_csv:", "# module for regular expression operations import csv # module for csv output", "to_csv: tow = csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label", "als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt die Anzahl der", "all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42) # In[11]: # speichert train- und", "open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" %", "delimiter = \";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, reduced_labels_reduced,", "[\"Histoire de l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches biographiques_T\", \"Approches de", "\"text\"]) for ident, labels, texts in zip(z_test_rl, y_test_rl, X_test_rl): labellist = \", \".join(labels)", "droit_T\"], \"Économie_T\" : [\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire", "\"Toxicomanie_D\"], \"Sciences de l'information et de la communication_D\" : [\"Communication_D\", \"Sciences de l'information", "der Blogbeiträge pro Label in txt-Datei # (disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\")", "sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) #", "#print(\"\\n text in iter:\", text) yield text # preprocessing #========================== # convert text", "# In[26]: # disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte Klassen", "culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de l'information", "[\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail social et politique sociale_D\" : [\"Études", "bars plt.barh(y_pos, height) # Create names on the y-axis plt.yticks(y_pos, bars) # Show", "vocabulary = {} # first row is headline for i, document in enumerate(documents[1:]):", "sociale_D\" : [\"Études des relations interethniques_D\", \"Études sur la famille_D\", \"Questions sociales_D\", \"Travail", "weniger als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic))", "document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 =", "tmp_text) else: tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\",", "santé publique_D\" : [\"Éthique_D\", \"Politique et services de santé_D\", \"Sciences et pratiques des", "\"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement et", "level def reduce_labels(y): \"\"\"Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe\"\"\" labels", "Blogbeiträge pro Label in txt-Datei # (disciplines_only reduziert auf Labels mit mehr als", "z = filnames X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset,", "\"Méthodes quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\", \"Vie de la recherche_T\"], \"Époque contemporaine_T\" :", "text) documents = openfile.readlines() openfile.close() texts = self.split_csv(documents) for text in texts: #print(\"\\n", "= csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "santé et de la santé publique_D\" : [\"Éthique_D\", \"Politique et services de santé_D\",", "[\"Histoire du droit_T\", \"Sociologie du droit_T\"], \"Économie_T\" : [\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\",", "disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie", "+ tmp_label2) #print(tmp_label) tmp_label = [x.strip() for x in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text)", "labellist, textlist]) # # Erstellung des Korpus reduced_labels # In[12]: # themes themes_dic", "et anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail social et politique", "# # Visualisierungen # In[ ]: pictures = '../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures)", "= [] for key, value in label_dic.items(): if value < 100: #print(\\\"%s :", "Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s :", "urbaines_D\"], \"géographie et développement_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire et archéologie_D\"", "in v] for k, v in themes_dic.items()} print(\"THEMES:\") for key, value in themes_dic.items():", "open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for key, value in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" %", "all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\",", "100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt die", "bplr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[9]:", "et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie des sciences_D\", \"Histoire des sciences", "In[19]: # splittet all_labels in train- und testset # x = text, y", "zip(z_train_al, y_train_al, X_train_al): labellist = \", \".join(labels) textlist = \" \".join(texts) train_al.writerow([ident, labellist,", "newline='', encoding=\"utf-8\") as al_csv: alw = csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\", \"classes\", \"text\"])", "value)) print(\"%s: %s\" % (key, value)) # In[23]: # schreibt filename, classes, text", "'“', '■', '•', '§', '$', '@', '€', '&', '%', '&', '+', '*', '=',", "Erstellung des Korpus themes_only # In[21]: # themes_only dic themes_only_dic = get_label_dic(themes_only) print(\"Auf", "'·', '\"', '„', '“', '”', \"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\", '(', ')', '[',", "module for wordclouds # In[2]: # Class for accessing and preprocessing the data", "ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label) texts.append(text) return idents, labels, texts identset, disciplines_only_reduced, textset", "(key, value)) print(\"%s: %s\" % (key, value)) count += value print(\"Anzahl der vergebenen", "[\"Approches biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\",", "\"Mouvements politiques et sociaux_T\", \"Politiques et actions publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie", "ident = [] label = [] text = [] vocabulary = {} #", "save each document as one item of a list (one document = identnumber,", "open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for key, value in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" %", "# x = text, y = labels, z = filnames X_train_al, X_test_al, y_train_al,", ": %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[17]: #", "[\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas Moyen Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\" :", "# # Autorin: <NAME> # In[1]: # Imports import os import numpy as", "value)) # In[27]: # disciplines_only dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic)", "filnames X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size =", "in y: for l in label: if l in labelcount_dic: labelcount_dic[l] += 1", "test_size = 0.20, stratify=all_labels_reduced, random_state=42) # In[11]: # speichert train- und testset von", "vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1 =", "#print(\"tmp_text:\", tmp_text) else: tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip()", "import matplotlib.pyplot as plt # module for visualization from wordcloud import WordCloud #", "delimiter = \";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_al, y_train_al,", "as train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "\"Mexique et Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie", "'^']: norm_text = norm_text.replace(char, ' ') tokens = norm_text.split() vocab = {} for", "open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_al_csv: test_al = csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\",", "\"Anthropologie sociale_T\"], \"Études des sciences_T\" : [\"Histoire des sciences_T\", \"Philosophie des sciences_T\", \"Sociologie", "tmp_ident) tmp_label = [] if re.match(\"aes_\", tmp_ident): # Blog \"aes - <NAME>\" hat", "disciplines_dic = {\"administration publique et développement_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"],", "= \";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_al, y_train_al, X_train_al):", "delimiter = \";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_to, y_test_to,", "labels, z = filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced,", "= openfile.readlines() openfile.close() texts = self.split_csv(documents) for text in texts: #print(\"\\n text in", "dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt", "\"text\"]) for ident, labels, texts in zip(z_test_do, y_test_do, X_test_do): labellist = \", \".join(labels)", "\".join(texts) test_do.writerow([ident, labellist, textlist]) # # Visualisierungen # In[ ]: pictures = '../Visualisierungen'", "In[21]: # themes_only dic themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic))", "\"Moyen Âge_T\" : [\"Bas Moyen Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\"", "reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in", "et sociologie du livre_T\", \"Sciences de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\"", "\"emploi_T\"], \"Éducation_T\" : [\"Histoire de l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches", "X_train_to): labellist = \", \".join(labels) textlist = \" \".join(texts) train_to.writerow([ident, labellist, textlist]) with", "[\"Édition électronique_T\", \"Histoire et sociologie de la presse_T\", \"Histoire et sociologie des médias_T\",", "documents): \"\"\"Splittet jede Zeile des eingelesenen Dokuments in die drei Listen 1) filnames", "\" \".join(texts) rlw.writerow([ident, labellist, textlist]) # In[19]: # splittet all_labels in train- und", "newline='', encoding=\"utf-8\") as train_al_csv: train_al = csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\", \"classes\", \"text\"])", "sociologie de la presse_T\", \"Histoire et sociologie des médias_T\", \"Histoire et sociologie du", "tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for x in tmp_label2]", "In[25]: # speichert train-, validation- und testset von themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv',", "[\"Éthique_D\", \"Politique et services de santé_D\", \"Sciences et pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"],", "% (key, value)) print(len(list(themes_dic))) # In[13]: disciplines_dic = {\"administration publique et développement_D\" :", "key, value in disciplines_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(disciplines_dic))) # In[14]: #", "y = labels, z = filnames X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do =", "to split data into train and test sets import matplotlib.pyplot as plt #", "\"Théorie du langage et linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\", \"Théorie", "von themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as to_csv: tow =", "# In[33]: # Visualisierung des all_label_dics in einer Wortwolke # Create a list", "vergebenen Labels:\", count) # In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced)", "'7', '8', '9', '.', ',', ';', ':', '!', '?', '…','·', '·', '\"', '„',", "value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key,", "de la géographie_T\", \"Espace_T\", \"société et territoire_T\", \"Géographie : politique_T\", \"culture et représentation_T\",", "\", \" for label in y: for l in label: if l in", "= csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "as bplr: for key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" % (key, value))", "Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro", "count += value print(\"Anzahl der vergebenen Labels:\", count) # In[8]: all_labels_reduced, small_classes_all_labels =", "zip(z_test_al, y_test_al, X_test_al): labellist = \", \".join(labels) textlist = \" \".join(texts) test_al.writerow([ident, labellist,", "\"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement et de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes", "testset von disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_do_csv: train_do", "y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42) #", "label, text in zip(identset, labelset, textset): if ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label) texts.append(text)", "= \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_rl, y_train_rl, X_train_rl):", "(i, elements)) for element in elements: #print(\"\\nLabel:\", element) # themes for key, value", "%s\" % (key, value)) count += value print(\"Anzahl der vergebenen Labels:\", count) #", "data into train and test sets import matplotlib.pyplot as plt # module for", "spécialisée_D\"], \"Études environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie et développement_D\" :", "Erstellung des Korpus reduced_labels # In[12]: # themes themes_dic = {\"Afrique_T\" : [\"Afrique", "len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt die Anzahl der", "politique_T\"], \"Études urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe centrale et", "re # module for regular expression operations import csv # module for csv", "height) # Create names on the y-axis plt.yticks(y_pos, bars) # Show graphic plt.show()", "zip(z_test_do, y_test_do, X_test_do): labellist = \", \".join(labels) textlist = \" \".join(texts) test_do.writerow([ident, labellist,", "développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de la", "with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for key, value in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\"", "[x.strip()+'_t' for x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\",", "\"classes\", \"text\"]) for ident, labels, texts in zip(identset, themes_only, textset): tow.writerow([ident, labels, texts])", "\"’\", \"‘\", \"‚\",\"'\", '(', ')', '[', ']', '{', '}', '/', '\\\\', '|', '_',", "data # def __init__(self, file, x, y): def __init__(self, file): self.file = file", "[\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce", "for key, value in label_dic.items(): if value < 100: #print(\\\"%s : %s\\\" %", "\"Études asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\"", "cognitives_T\"], \"Préhistoire et antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\",", "\".join(texts) test_to.writerow([ident, labellist, textlist]) # # Erstellung des Korpus disciplines_only # In[26]: #", "with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_al_csv: test_al = csv.writer(test_al_csv, delimiter = \";\")", "% (key, value)) print(\"%s: %s\" % (key, value)) count += value print(\"Anzahl der", "'\\\\', '|', '_', '-', '–', '—', '­', '„', '“', '■', '•', '§', '$',", "de la consommation_T\", \"Sociologie de la culture_T\", \"Sociologie de la santé_T\", \"Sociologie du", "tow.writerow([ident, labels, texts]) # In[24]: # splittet all_labels in train- und testset #", "des all_label_dics in einer Wortwolke # Create a list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',',", "labelcount_dic[l] += 1 else: labelcount_dic[l] = 1 return labelcount_dic # In[6]: # löscht", "\"Psychologie sociale_D\"], \"Sciences de la santé et de la santé publique_D\" : [\"Éthique_D\",", ": [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie et développement_D\" : [\"Études environnementales_D\", \"Géographie_D\",", "ident, labels, texts in zip(z_train_al, y_train_al, X_train_al): labellist = \", \".join(labels) textlist =", "random_state=42) # In[20]: # speichert train- und testset von all_labels in csv-Dateien with", "\"classes\", \"text\"]) for ident, labels, texts in zip(z_test_al, y_test_al, X_test_al): labellist = \",", "if re.match(\"aes_\", tmp_ident): # Blog \"aes - <NAME>\" hat nur Thèmes: Histoire, Religions", "sets import matplotlib.pyplot as plt # module for visualization from wordcloud import WordCloud", "small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als 100 Texten:\",", "for element in elements: #print(\"\\nLabel:\", element) # themes for key, value in themes_dic.items():", "(reduziert):\", len(themes_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei #", "key, value in label_dic.items(): if value < 100: #print(\\\"%s : %s\\\" % (key,", "# In[31]: # speichert train-, validation- und testset von disciplines_only in csv-Dateien with", "element in blacklist: textliste.remove(element) text = str(textliste).replace(\"'\", \"\") # Create the wordcloud object", "\"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des femmes_T\", \"Histoire", "os import numpy as np import re # module for regular expression operations", "Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for key,", "with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\"", "reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor:", "in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value))", "'(', ')', '[', ']', '{', '}', '/', '\\\\', '|', '_', '-', '–', '—',", "re.match(\"aes_\", tmp_ident): # Blog \"aes - <NAME>\" hat nur Thèmes: Histoire, Religions tmp_label3", "contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à", "dieser Blog aus disciplines_only entfernt def delete_blog(identset, labelset, textset): idents = [] labels", "= '../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures) # In[32]: # Histogramm: Blogs pro all_labels", "de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique et Amérique centrale_T\",", "Label in txt-Datei # (reduced_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen)", "module for regular expression operations import csv # module for csv output from", "\"aes - <NAME>\" hat nur Thèmes: Histoire, Religions tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3", "\"Histoire_D\", \"Histoire et philosophie des sciences_D\", \"Histoire des sciences sociales_D\"], \"Langue et linguistique_D\"", "textset): labellist = \", \".join(labels) textlist = \" \".join(texts) rlw.writerow([ident, labellist, textlist]) #", "== key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif element in value: tmp_all_labels.append(key) tmp_themes.append(key)", "und testset von themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_to_csv:", "'r').read() for token in tokens: #if token in stopWords: if token in stopwords:", "= get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte", "\"Management et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" :", "train and test sets import matplotlib.pyplot as plt # module for visualization from", "# convert text to lower-case, remove punctuation and stopwords def normalize_text(self, text): \"\"\"Bereinigt", "for visualization from wordcloud import WordCloud # module for wordclouds # In[2]: #", "X_test_do, y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced,", "Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s :", "= get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic))", "in stopWords: if token in stopwords: continue else: words.append(token)\"\"\" #return words, vocab #return", "filnames 2) labels 3) text \"\"\" ident = [] label = [] text", "')', '[', ']', '{', '}', '/', '\\\\', '|', '_', '-', '–', '—', '­',", "train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_rl, y_train_rl, X_train_rl): labellist =", "[\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie du développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\",", "et critique littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management et administration_D\" : [\"Ergonomie_D\",", "reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for key, value in sorted(themes_only_dic.items()): to.write(\"%s :", "internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Arts et humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\",", "# Histogramm: Blogs pro all_labels (besser in excel visualisieren) height = list(all_labels_dic.values()) bars", "tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\",", ": [\"Âges de la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\", \"Sociologie de la", "txt-Datei # (disciplines_only reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\",", "the generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show() # Save as SVG:", "tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else:", "Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" :", "\"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des femmes_T\", \"Histoire du travail_T\", \"Histoire économique_T\", \"Histoire", "nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for key, value in sorted(themes_only_dic.items()): to.write(\"%s", "for key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\"", "= csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "\"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\", \"Asie du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\",", "100: #print(\\\"%s : %s\\\" % (key, value)) small_classes.append(key) for elements in labelset: tmp_labels", "\"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation et sciences de l'éducation_D\", \"Éducation : disciplines scientifiques_D\",", "= file # memory friendlys because doesn't load the corpus into memory! def", "train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]: # speichert train-", "als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt", "train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_do_csv: test_do = csv.writer(test_do_csv,", "lower-case, remove punctuation and stopwords def normalize_text(self, text): \"\"\"Bereinigt den Text: - transformiert", "Kleinschrift - löscht Satz- und Sonderzeichen\"\"\" norm_text = text.lower() # remove punctuation for", "environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des femmes_T\", \"Histoire du travail_T\", \"Histoire", "'études', 'de', 'des', 'la', 'dict_keys'] for element in textliste: if element in blacklist:", "in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value))", "in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text =", "remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen", "= \" \".join(texts) train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_to_csv:", "\".join(texts) train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_to_csv: test_to =", "wordcloud = WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) # Display the generated image: plt.imshow(wordcloud, interpolation='bilinear')", "'­', '„', '“', '■', '•', '§', '$', '@', '€', '&', '%', '&', '+',", "'').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 +", "# In[ ]: pictures = '../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures) # In[32]: #", "tmp_ident, tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text)", "list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist = ['et',", "moderne_T\" : [\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie", "pro Label in txt-Datei # (all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl:", "document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\", 4)[3].strip()", "str(textliste).replace(\"'\", \"\") # Create the wordcloud object wordcloud = WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text)", "# In[18]: # schreibt filename, classes, text von reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv',", "l'information et de la communication_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Sciences", "#print(\"\\nDiscipline key:\", element) elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element", "= remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_all_labels))", "with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for key, value in sorted(vocab.items()): v.write(\"%s : %s\\n\"", "elements in labelset: tmp_labels = [] for element in elements: if element in", "in zip(z_test_to, y_test_to, X_test_to): labellist = \", \".join(labels) textlist = \" \".join(texts) test_to.writerow([ident,", "vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\", \"Sociologie de la consommation_T\", \"Sociologie de la", "tokens = norm_text.split() vocab = {} for word in tokens: if word in", "+= 1 else: vocab[word] = 1 \"\"\" # read stopwords words = []", "is headline for i, document in enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident)", "\"Sociologie de la consommation_T\", \"Sociologie de la culture_T\", \"Sociologie de la santé_T\", \"Sociologie", "= reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: # reduced_labels dic reduced_labels_dic", "\"text\"]) for ident, labels, texts in zip(identset, themes_only, textset): tow.writerow([ident, labels, texts]) #", "else: vocabulary[key] = value return ident, label, text, vocabulary # In[3]: # get", "key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" %", "= text, y = labels, z = filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl,", "labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_to_csv: test_to = csv.writer(test_to_csv, delimiter", "insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count = 0 classes = list(all_labels_dic) # schreibt die Anzahl", "# Für den Blog des Archivs der Erzdiözese Salzburg (aes) wurden keine Disziplinen", "i in v] for k, v in themes_dic.items()} print(\"THEMES:\") for key, value in", "la santé_T\", \"Sociologie du travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport et loisirs_T\"]} themes_dic", "\"text\"]) for ident, labels, texts in zip(identset, disciplines_only, textset): dow.writerow([ident, labels, texts]) #", ": %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[22]: #", "= \", \".join(labels) textlist = \" \".join(texts) test_to.writerow([ident, labellist, textlist]) # # Erstellung", "value)) print(\"%s: %s\" % (key, value)) count += value print(\"Anzahl der vergebenen Labels:\",", "to lower-case, remove punctuation and stopwords def normalize_text(self, text): \"\"\"Bereinigt den Text: -", "= csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "der Blogbeiträge pro Label (Label : Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {}", "die Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels reduziert auf Labels", "urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" :", "stopwords words = [] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for token in tokens: #if", "critique littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management et administration_D\" : [\"Ergonomie_D\", \"Travail", "\"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études des sciences_T\" : [\"Histoire des sciences_T\",", "soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes et scandinaves_T\", \"Péninsule", "train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_to_csv: test_to = csv.writer(test_to_csv,", "textset): if ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label) texts.append(text) return idents, labels, texts identset,", "labelcount_dic: labelcount_dic[l] += 1 else: labelcount_dic[l] = 1 return labelcount_dic # In[6]: #", "tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes)))", "expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de la santé et de la", "encoding=\"utf-8\") as do_csv: dow = csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for", "txt-Datei # (all_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\",", "norm_text.replace(char, ' ') tokens = norm_text.split() vocab = {} for word in tokens:", "') tokens = norm_text.split() vocab = {} for word in tokens: if word", "appliquée_D\", \"Théorie du langage et linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\",", "sciences_T\" : [\"Histoire des sciences_T\", \"Philosophie des sciences_T\", \"Sociologie des sciences_T\"], \"Études du", "(key, value)) # In[17]: # reduced_labels reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels,", "sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) #", "element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return labels, themes, disciplines", "\"Finance_D\"], \"Éducation_D\" : [\"Éducation et sciences de l'éducation_D\", \"Éducation : disciplines scientifiques_D\", \"Éducation", "sociales_D\"], \"Langue et linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie du langage et linguistique_D\", \"Langue", "else: tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2)", "Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et", "with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_to_csv: train_to = csv.writer(train_to_csv, delimiter = \";\")", "identset, labelset, textset, vocab = MyCorpus(file) # save vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\",", "#print(\"tmp_ident:\", tmp_ident) tmp_label = [] if re.match(\"aes_\", tmp_ident): # Blog \"aes - <NAME>\"", "\".join(labels) textlist = \" \".join(texts) alw.writerow([ident, labellist, textlist]) # In[10]: # splittet all_labels", "= \" \".join(texts) test_to.writerow([ident, labellist, textlist]) # # Erstellung des Korpus disciplines_only #", "[\"Éducation et sciences de l'éducation_D\", \"Éducation : disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\"", "key, value in themes_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(themes_dic))) # In[13]: disciplines_dic", "und testset # x = text, y = labels, z = filnames X_train_rl,", "= [] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for token in tokens: #if token in", "text \"\"\" ident = [] label = [] text = [] vocabulary =", "\";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_do, y_train_do, X_train_do): labellist", "y_test_do, X_test_do): labellist = \", \".join(labels) textlist = \" \".join(texts) test_do.writerow([ident, labellist, textlist])", "<NAME> # In[1]: # Imports import os import numpy as np import re", "l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude", "[i.lower() for i in v] for k, v in disciplines_dic.items()} print(\"DISCIPLINES:\") for key,", "with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as rl_csv: rlw = csv.writer(rl_csv, delimiter = \";\")", "encoding=\"utf-8\") as train_do_csv: train_do = csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for", "et bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie et", "politique_T\", \"culture et représentation_T\", \"Géographie appliquée et aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\",", "= {} # first row is headline for i, document in enumerate(documents[1:]): tmp_ident", "\"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du droit_T\",", ": %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[28]: #", "y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42) #", "% (key, value)) print(\"%s: %s\" % (key, value)) # In[17]: # reduced_labels reduced", "#print(\"\\nLabel:\", element) # themes for key, value in themes_dic.items(): if element == key:", "for text in texts: #print(\"\\n text in iter:\", text) yield text # preprocessing", "sociaux_T\", \"Politiques et actions publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\"", "textset): idents = [] labels = [] texts = [] for ident, label,", "'/', '\\\\', '|', '_', '-', '–', '—', '­', '„', '“', '■', '•', '§',", "\"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges", "Klassen, denen weniger als 100 Blogbeiträge zugeordnet sind\"\"\" small_classes = [] reduced_labelset =", "stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for token in tokens: #if token in stopWords: if", "\"text\"]) for ident, labels, texts in zip(z_train_al, y_train_al, X_train_al): labellist = \", \".join(labels)", "newline='', encoding=\"utf-8\") as train_do_csv: train_do = csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\", \"classes\", \"text\"])", "testset # x = text, y = labels, z = filnames X_train_al, X_test_al,", "# In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit weniger", "\"Littérature romane_D\", \"Littérature_D\"], \"Management et administration_D\" : [\"Ergonomie_D\", \"Travail et relations professionnelles_D\", \"Planification", "= list(all_labels_dic) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei #", "into memory! def __iter__(self): openfile = open(self.file, 'r', encoding='utf-8') # save each document", "auxiliaires de l'Histoire_T\", \"Vie de la recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\",", "= \", \".join(labels) textlist = \" \".join(texts) train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w',", "Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die", "tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1", "l'information et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce et affaires_D\", \"Économie_D\",", "csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_to,", "csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_rl,", "elements in enumerate(y): tmp_all_labels = [] tmp_themes = [] tmp_disciplines = [] #print(\"\\nlabels", "a list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist =", "print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt", "\"Droit_D\"], \"Économie_D\" : [\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation et sciences", "in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for x in", "\".join(labels) textlist = \" \".join(texts) train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\")", "l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\",", "und testset # x = text, y = labels, z = filnames X_train_al,", "in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value))", "themes for key, value in themes_dic.items(): if element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme", "textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_do_csv: test_do = csv.writer(test_do_csv, delimiter =", "pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de la santé et de la santé publique_D\"", "von themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_to_csv: train_to =", "with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as al_csv: alw = csv.writer(al_csv, delimiter = \";\")", "als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for key, value in sorted(all_labels_reduced_dic.items()):", "texts in zip(identset, all_labels_reduced, textset): labellist = \", \".join(labels) textlist = \" \".join(texts)", "Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label = \", \" for label", "%s\" % (key, value)) # In[22]: # themes_only dic reduced (<100) themes_only_reduced, small_classes_themes_only", "'w', newline='', encoding=\"utf-8\") as test_do_csv: test_do = csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\", \"classes\",", "as train_do_csv: train_do = csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "in lists def split_csv(self, documents): \"\"\"Splittet jede Zeile des eingelesenen Dokuments in die", "in tokens: #if token in stopWords: if token in stopwords: continue else: words.append(token)\"\"\"", "value)) print(\"%s: %s\" % (key, value)) # In[27]: # disciplines_only dic reduced (<100)", "'”', \"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\", '(', ')', '[', ']', '{', '}', '/',", "reduced (<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger", "= csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for key, value in sorted(vocab.items()):", "labels, texts in zip(z_test_to, y_test_to, X_test_to): labellist = \", \".join(labels) textlist = \"", "= identnumber, nace-code-list + text) documents = openfile.readlines() openfile.close() texts = self.split_csv(documents) for", "the data folder = '../Preprocessing' datasets = '../Datasets' file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts:", "\"Institutions politiques_T\", \"Mouvements politiques et sociaux_T\", \"Politiques et actions publiques_T\", \"Relations internationales_T\", \"Sciences", "csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_do,", "der Texte # # Autorin: <NAME> # In[1]: # Imports import os import", "Sonderzeichen\"\"\" norm_text = text.lower() # remove punctuation for char in ['0', '1', '2',", "# In[1]: # Imports import os import numpy as np import re #", "def remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen, denen weniger als 100 Blogbeiträge zugeordnet sind\"\"\"", "[\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe centrale et orientale_T\", \"Mondes russes et", "textset = delete_blog(identset, disciplines_only_reduced, textset) # In[30]: # splittet all_labels in train- und", "'—', '­', '„', '“', '■', '•', '§', '$', '@', '€', '&', '%', '&',", "with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\"", "kleine Klassen (<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen, denen weniger als 100", "hat nur Thèmes: Histoire, Religions tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\")", "der Blogbeiträge pro Label in txt-Datei # (all_labels reduziert auf Labels mit mehr", "littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management et administration_D\" : [\"Ergonomie_D\", \"Travail et", "de la presse_T\", \"Histoire et sociologie des médias_T\", \"Histoire et sociologie du livre_T\",", "die Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels nicht reduziert) with", "in tokens: if word in vocab: vocab[word] += 1 else: vocab[word] = 1", "validation- und testset von disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as", "key, value in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" %", "Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire", "\"Sciences de l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\",", "der Blogbeiträge pro Label in txt-Datei # (themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\")", "Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei", "small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als 100 Texten:\",", "labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_al_csv: test_al = csv.writer(test_al_csv, delimiter", "\"Géographie appliquée et aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage", "In[3]: # get corpus from disk identset, labelset, textset, vocab = MyCorpus(file) #", "\"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information et de la communication_D\" : [\"Communication_D\", \"Sciences de", "v] for k, v in disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value in disciplines_dic.items(): print(\"%s:", "friendlys because doesn't load the corpus into memory! def __iter__(self): openfile = open(self.file,", "\"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\",", "text von disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as do_csv: dow", "einer Wortwolke # Create a list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\",", "In[31]: # speichert train-, validation- und testset von disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv',", "themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) # schreibt die Anzahl", "%s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[22]: # themes_only", "x = text, y = labels, z = filnames X_train_al, X_test_al, y_train_al, y_test_al,", "Salzburg (aes) wurden keine Disziplinen gewählt, # dementsprechend wird dieser Blog aus disciplines_only", "\"Droit_T\" : [\"Histoire du droit_T\", \"Sociologie du droit_T\"], \"Économie_T\" : [\"Développement économique_T\", \"Économie", "ident, labels, texts in zip(identset, all_labels_reduced, textset): labellist = \", \".join(labels) textlist =", "as rl_csv: rlw = csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "height=680, margin=0, background_color=\"white\").generate(text) # Display the generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0)", "höchste Hierarchiestufe\"\"\" labels = [] # new y themes = [] disciplines =", "test_al = csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "développement_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Arts et humanités_D\" : [\"Architecture_D\",", "labelset, textset): if ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label) texts.append(text) return idents, labels, texts", "textlist = \" \".join(texts) test_to.writerow([ident, labellist, textlist]) # # Erstellung des Korpus disciplines_only", "\"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe centrale et orientale_T\", \"Mondes russes et soviétiques_T\", \"France_T\",", "tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for x in tmp_label1] tmp_label.extend(tmp_label1)", "with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for key, value in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\"", "In[11]: # speichert train- und testset von all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w',", "train_test_split(textset, all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42) # In[11]: # speichert train-", "germanique_T\", \"Pays baltes et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie & histoire", "encoding=\"utf8\") as tor: for key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" % (key,", "textlist = \" \".join(texts) train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as", "environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie et développement_D\" : [\"Études environnementales_D\",", "reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label", "# speichert train- und testset von all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='',", "Blog aus disciplines_only entfernt def delete_blog(identset, labelset, textset): idents = [] labels =", "text = [] vocabulary = {} # first row is headline for i,", "dow = csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "text in texts: #print(\"\\n text in iter:\", text) yield text # preprocessing #==========================", "= ['et', 'du', 'études', 'de', 'des', 'la', 'dict_keys'] for element in textliste: if", "= filnames X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size", "value return ident, label, text, vocabulary # In[3]: # get corpus from disk", "(besser in excel visualisieren) height = list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos = np.arange(len(bars))", "for token in tokens: #if token in stopWords: if token in stopwords: continue", "print(len(textset)) # In[5]: def get_label_dic(y): \"\"\"Erstellt ein dictionary zur Anzahl der Blogbeiträge pro", "des Korpus themes_only # In[21]: # themes_only dic themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste", "soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information et de la communication_D\" : [\"Communication_D\", \"Sciences", "\";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, all_labels_reduced, textset): labellist", "x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for x", "weniger als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic))", "textset): labellist = \", \".join(labels) textlist = \" \".join(texts) alw.writerow([ident, labellist, textlist]) #", "\", \".join(labels) textlist = \" \".join(texts) train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='',", "ident, label, text, vocabulary # In[3]: # get corpus from disk identset, labelset,", "get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) #", "remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf", "\"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du droit_T\", \"Sociologie du droit_T\"], \"Économie_T\" : [\"Développement", ": [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements politiques", "politiques et sociaux_T\", \"Politiques et actions publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"],", "(one document = identnumber, nace-code-list + text) documents = openfile.readlines() openfile.close() texts =", "Klassen (<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen, denen weniger als 100 Blogbeiträge", "print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]: def get_label_dic(y): \"\"\"Erstellt ein dictionary zur Anzahl", "\"Études du politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et massacres_T\", \"Histoire politique_T\", \"Institutions", "% (key, value)) # In[9]: # schreibt filename, classes, text von all_labels in", "[] vocabulary = {} # first row is headline for i, document in", "textlist]) # # Erstellung des Korpus reduced_labels # In[12]: # themes themes_dic =", "# Preprocessing der Texte # # Autorin: <NAME> # In[1]: # Imports import", "disciplines_only # In[26]: # disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte", "culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\",", "und Sonderzeichen\"\"\" norm_text = text.lower() # remove punctuation for char in ['0', '1',", "tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2", "tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label", "[\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" :", "(\"Element nicht gefunden:\", element) # discipilnes for key, value in disciplines_dic.items(): if element", "\"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\",", "self.split_csv(documents) for text in texts: #print(\"\\n text in iter:\", text) yield text #", "stratify=disciplines_only_reduced, random_state=42) # In[31]: # speichert train-, validation- und testset von disciplines_only in", "disciplines_only_reduced, textset) # In[30]: # splittet all_labels in train- und testset # x", "# In[9]: # schreibt filename, classes, text von all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv',", "{} # first row is headline for i, document in enumerate(documents[1:]): tmp_ident =", "wordcloud import WordCloud # module for wordclouds # In[2]: # Class for accessing", "et développement_D\", \"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" :", "visuelles_T\", \"Histoire culturelle_T\", \"Histoire de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de", "in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 + tmp_label2) #print(tmp_label) tmp_label = [x.strip() for", "for k, v in disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value in disciplines_dic.items(): print(\"%s: %s\"", "\"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist = ['et', 'du', 'études', 'de', 'des', 'la', 'dict_keys']", "filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing des Korpus \"\"\" #", "des médias_T\", \"Histoire et sociologie du livre_T\", \"Sciences de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\",", "= \";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, disciplines_only, textset):", "[] #print(\"\\nlabels in y an der Stelle %s: %s\" % (i, elements)) for", "ident, labels, texts in zip(z_train_to, y_train_to, X_train_to): labellist = \", \".join(labels) textlist =", "[\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique et Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"],", "']', '{', '}', '/', '\\\\', '|', '_', '-', '–', '—', '­', '„', '“',", "(disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for key, value in sorted(disciplines_only_dic.items()):", "actions publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\" : [\"Études urbaines_T\"],", "# Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing des Korpus", "save vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for key, value in", "and preprocessing the data folder = '../Preprocessing' datasets = '../Datasets' file = folder+'/de_labeled_corpus.csv'", "tow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, themes_only, textset): tow.writerow([ident, labels,", "la famille_D\", \"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic = {k.lower(): [i.lower() for i in", "In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]: def get_label_dic(y): \"\"\"Erstellt ein", "\"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"],", "train_rl = csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 + tmp_label2) #print(tmp_label) tmp_label = [x.strip() for x in", "Thèmes: Histoire, Religions tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 =", "= \", \".join(labels) textlist = \" \".join(texts) alw.writerow([ident, labellist, textlist]) # In[10]: #", "disciplines_only = reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: # reduced_labels dic", "# In[29]: # Für den Blog des Archivs der Erzdiözese Salzburg (aes) wurden", "in elements: #print(\"\\nLabel:\", element) # themes for key, value in themes_dic.items(): if element", "die Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only nicht reduziert) with", "test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_do, y_test_do, X_test_do): labellist =", ": [\"Histoire du droit_T\", \"Sociologie du droit_T\"], \"Économie_T\" : [\"Développement économique_T\", \"Économie politique_T\",", "von all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_al_csv: train_al =", "tmp_disciplines = [] #print(\"\\nlabels in y an der Stelle %s: %s\" % (i,", "urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe centrale et orientale_T\", \"Mondes", "\"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire des religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\"", "# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels nicht", "for key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\"", "et philosophie des sciences_D\", \"Histoire des sciences sociales_D\"], \"Langue et linguistique_D\" : [\"Linguistique", "value)) print(\"%s: %s\" % (key, value)) # In[9]: # schreibt filename, classes, text", "with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as to_csv: tow = csv.writer(to_csv, delimiter = \";\")", "siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"],", "de l'éducation_D\", \"Éducation : disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\" : [\"Études environnementales_D\",", "labellist, textlist]) # # Erstellung des Korpus disciplines_only # In[26]: # disciplines_only dic", "in excel visualisieren) height = list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) #", "\"1939-1945_T\", \"1945-1989_T\", \"1989 à de nos jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\" : [\"Révolution", "als 100 Blogbeiträge zugeordnet sind\"\"\" small_classes = [] reduced_labelset = [] for key,", "y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42)", "get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt die Anzahl der", "des Archivs der Erzdiözese Salzburg (aes) wurden keine Disziplinen gewählt, # dementsprechend wird", "(<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als", "mit weniger als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic))", "as test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "for ident, labels, texts in zip(z_train_al, y_train_al, X_train_al): labellist = \", \".join(labels) textlist", "\"minorités_T\", \"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des femmes_T\",", "tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return labels, themes, disciplines reduced_labels, themes_only,", "# In[6]: # löscht kleine Klassen (<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen,", "Blogbeiträge pro Label in txt-Datei # (reduced_labels reduziert auf Labels mit mehr als", "random_state=42) # In[11]: # speichert train- und testset von all_labels in csv-Dateien with", ": [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire des religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\" :", "%s\" % (key, value)) # In[17]: # reduced_labels reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels", "in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key, value in vocab.items(): if key in", "'>', '^']: norm_text = norm_text.replace(char, ' ') tokens = norm_text.split() vocab = {}", "(key, value)) # In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]: def", "Korpus reduced_labels # In[12]: # themes themes_dic = {\"Afrique_T\" : [\"Afrique du nord_T\",", "print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die Anzahl der", "\"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" :", "element in small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes # # Erstellung", "\"Études urbaines_D\"], \"Histoire et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie des sciences_D\",", "\"Sport et loisirs_T\"]} themes_dic = {k.lower(): [i.lower() for i in v] for k,", "sciences_D\", \"Histoire des sciences sociales_D\"], \"Langue et linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie du", "small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\",", "centrale_T\", \"Afrique de l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\", \"Cône", "else: (\"Element nicht gefunden:\", element) # discipilnes for key, value in disciplines_dic.items(): if", "[\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\",", "schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels reduziert auf", "bars = list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) # Create horizontal bars plt.barh(y_pos, height) #", "\"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie & histoire de la géographie_T\", \"Espace_T\", \"société et territoire_T\",", "remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf", "# (all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for key, value in", "Listen 1) filnames 2) labels 3) text \"\"\" ident = [] label =", "self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1", "4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for x in", "tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text =", "= value return ident, label, text, vocabulary # In[3]: # get corpus from", "<NAME>\" hat nur Thèmes: Histoire, Religions tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"',", "löscht Satz- und Sonderzeichen\"\"\" norm_text = text.lower() # remove punctuation for char in", "\"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\",", "romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire des religions_T\",", "__iter__(self): openfile = open(self.file, 'r', encoding='utf-8') # save each document as one item", "a list (one document = identnumber, nace-code-list + text) documents = openfile.readlines() openfile.close()", "des genres_T\", \"Sociologie de la consommation_T\", \"Sociologie de la culture_T\", \"Sociologie de la", "labels) return labels, themes, disciplines reduced_labels, themes_only, disciplines_only = reduce_labels(labelset) # In[15]: print(reduced_labels[1000])", "as al_csv: alw = csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter =", "# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only reduziert", "%s\" % (key, value)) # In[27]: # disciplines_only dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only", "tmp_labels = [] for element in elements: if element in small_classes: continue else:", "et de la santé publique_D\" : [\"Éthique_D\", \"Politique et services de santé_D\", \"Sciences", "la recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\",", "des Korpus all_labels # In[7]: # all_labels dic all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\",", "pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Droit_D\"", "themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42) # In[25]: # speichert train-, validation-", "for key, value in themes_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(themes_dic))) # In[13]:", "X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced,", "label = [] text = [] vocabulary = {} # first row is", "et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation et sciences de l'éducation_D\", \"Éducation :", "disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) #", "= csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "remove punctuation for char in ['0', '1', '2', '3', '4', '5', '6', '7',", "txt-Datei # (disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for key, value", "element) # discipilnes for key, value in disciplines_dic.items(): if element == key: tmp_all_labels.append(element)", "output from sklearn.model_selection import train_test_split # module to split data into train and", "tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key)", "open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_do_csv: test_do = csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\",", "value)) print(\"%s: %s\" % (key, value)) # In[18]: # schreibt filename, classes, text", "'.', ',', ';', ':', '!', '?', '…','·', '·', '\"', '„', '“', '”', \"´\",", "encoding='utf-8') # save each document as one item of a list (one document", "Religions tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for", "nace-code-list + text) documents = openfile.readlines() openfile.close() texts = self.split_csv(documents) for text in", "sociologie du livre_T\", \"Sciences de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" :", "y-axis plt.yticks(y_pos, bars) # Show graphic plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg')", "In[32]: # Histogramm: Blogs pro all_labels (besser in excel visualisieren) height = list(all_labels_dic.values())", "textlist = \" \".join(texts) alw.writerow([ident, labellist, textlist]) # In[10]: # splittet all_labels in", "import re # module for regular expression operations import csv # module for", "test_do.writerow([ident, labellist, textlist]) # # Visualisierungen # In[ ]: pictures = '../Visualisierungen' if", "\"Littérature_D\" : [\"Études littéraires_D\", \"Théorie et critique littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"],", "all_label_dics in einer Wortwolke # Create a list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\",", "with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for key, value in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\"", "\", \".join(labels) textlist = \" \".join(texts) test_al.writerow([ident, labellist, textlist]) # # Erstellung des", "asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" :", "train- und testset von all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as", "von disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as do_csv: dow =", "key, value in themes_dic.items(): if element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element)", "publique_D\"], \"Sociologie et anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail social", "plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung des", "y_test_rl, X_test_rl): labellist = \", \".join(labels) textlist = \" \".join(texts) test_rl.writerow([ident, labellist, textlist])", "count) # In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit", "documents = openfile.readlines() openfile.close() texts = self.split_csv(documents) for text in texts: #print(\"\\n text", "' ') tokens = norm_text.split() vocab = {} for word in tokens: if", "open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_al_csv: train_al = csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\",", ": [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire", "[i.lower() for i in v] for k, v in themes_dic.items()} print(\"THEMES:\") for key,", "labels, texts in zip(z_test_rl, y_test_rl, X_test_rl): labellist = \", \".join(labels) textlist = \"", "len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die Anzahl", "train-, validation- und testset von disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\")", "\"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire des religions_T\", \"Sociologie des religions_T\"],", "antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" :", "\";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, disciplines_only, textset): dow.writerow([ident,", "of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist = ['et', 'du',", "= [] label = [] text = [] vocabulary = {} # first", "'4', '5', '6', '7', '8', '9', '.', ',', ';', ':', '!', '?', '…','·',", "1 \"\"\" # read stopwords words = [] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for", "sklearn.model_selection import train_test_split # module to split data into train and test sets", "'8', '9', '.', ',', ';', ':', '!', '?', '…','·', '·', '\"', '„', '“',", "(reduziert):\", len(all_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei #", "Blogbeiträge pro Label in txt-Datei # (all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as", "biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\",", "\"Espace_T\", \"société et territoire_T\", \"Géographie : politique_T\", \"culture et représentation_T\", \"Géographie appliquée et", ": [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\",", "\"\"\"Löscht die Klassen, denen weniger als 100 Blogbeiträge zugeordnet sind\"\"\" small_classes = []", "[\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire grecque_T\",", "\", \".join(labels) textlist = \" \".join(texts) train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='',", "100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s", "[\"Ergonomie_D\", \"Travail et relations professionnelles_D\", \"Planification et développement_D\", \"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\"", "because doesn't load the corpus into memory! def __iter__(self): openfile = open(self.file, 'r',", "labellist = \", \".join(labels) textlist = \" \".join(texts) train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv',", "get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) # schreibt die Anzahl der Blogbeiträge", "for key, value in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\"", "= \";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_do, y_test_do, X_test_do):", "\"Sciences politiques_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie et anthropologie_D\" :", "texts in zip(z_train_rl, y_train_rl, X_train_rl): labellist = \", \".join(labels) textlist = \" \".join(texts)", "'r', encoding='utf-8') # save each document as one item of a list (one", "\"Sociologie du droit_T\"], \"Économie_T\" : [\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\"", "key:\", element) elif element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element nicht", "\"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\" : [\"Égypte ancienne_T\",", "themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste", "identnumber, nace-code-list + text) documents = openfile.readlines() openfile.close() texts = self.split_csv(documents) for text", "100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s", "[\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie", "\"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie", "= \";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_al, y_test_al, X_test_al):", "print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in", "= {k.lower(): [i.lower() for i in v] for k, v in disciplines_dic.items()} print(\"DISCIPLINES:\")", "schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only reduziert auf", "[\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie des sciences_D\", \"Histoire des sciences sociales_D\"], \"Langue et", "\"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des femmes_T\", \"Histoire du travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\",", "csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_rl,", "def __init__(self, file, x, y): def __init__(self, file): self.file = file # memory", "de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas Moyen Âge_T\", \"Haut", "\" \".join(texts) test_do.writerow([ident, labellist, textlist]) # # Visualisierungen # In[ ]: pictures =", "print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt die Anzahl der Blogbeiträge", "des Korpus \"\"\" # file: input data # def __init__(self, file, x, y):", "elements: if element in small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes #", "environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie", ": [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations internationales_D\", \"Sciences", "%s\" % (key, value)) # In[18]: # schreibt filename, classes, text von reduced_labels", "stratify=themes_only_reduced, random_state=42) # In[25]: # speichert train-, validation- und testset von themes_only in", "len(themes_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only", "format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung des all_label_dics in einer Wortwolke # Create", "et sociologie des médias_T\", \"Histoire et sociologie du livre_T\", \"Sciences de l'information_T\"], \"Langage_T\"", "reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label", "all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt", "{\"Afrique_T\" : [\"Afrique du nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique", "des sciences_T\" : [\"Histoire des sciences_T\", \"Philosophie des sciences_T\", \"Sociologie des sciences_T\"], \"Études", "for k, v in themes_dic.items()} print(\"THEMES:\") for key, value in themes_dic.items(): print(\"%s: %s\"", "print(\"DISCIPLINES:\") for key, value in disciplines_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(disciplines_dic))) #", "\"classes\", \"text\"]) for ident, labels, texts in zip(z_test_to, y_test_to, X_test_to): labellist = \",", "0 classes = list(all_labels_dic) # schreibt die Anzahl der Blogbeiträge pro Label in", "sud_T\", \"Mexique et Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\",", "\"Sciences de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas Moyen Âge_T\",", "in iter:\", text) yield text # preprocessing #========================== # convert text to lower-case,", ": %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[18]: #", "encoding=\"utf-8\") as train_to_csv: train_to = csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for", "Label in txt-Datei # (themes_only reduziert auf Labels mit mehr als 100 Blogbeiträgen)", "encoding=\"utf8\") as to: for key, value in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" % (key,", "for ident, labels, texts in zip(z_train_do, y_train_do, X_train_do): labellist = \", \".join(labels) textlist", "document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label = [] if re.match(\"aes_\", tmp_ident): # Blog \"aes", "pro all_labels (besser in excel visualisieren) height = list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos", "urbaines_D\"], \"Histoire et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie des sciences_D\", \"Histoire", "tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key)", "sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\",", "y_train_do, X_train_do): labellist = \", \".join(labels) textlist = \" \".join(texts) train_do.writerow([ident, labellist, textlist])", "import csv # module for csv output from sklearn.model_selection import train_test_split # module", "\"Études environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie et développement_D\" : [\"Études", "\"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie du développement_D\", \"Psychologie éducative_D\",", "y: for l in label: if l in labelcount_dic: labelcount_dic[l] += 1 else:", "\"société et territoire_T\", \"Géographie : politique_T\", \"culture et représentation_T\", \"Géographie appliquée et aménagement_T\",", "value)) # In[18]: # schreibt filename, classes, text von reduced_labels in csv-Datei with", "politique_T\", \"Institutions politiques_T\", \"Mouvements politiques et sociaux_T\", \"Politiques et actions publiques_T\", \"Relations internationales_T\",", "% (key, value)) print(\"%s: %s\" % (key, value)) # In[22]: # themes_only dic", "% (key, value)) print(\"%s: %s\" % (key, value)) # In[9]: # schreibt filename,", "(key, value)) # In[22]: # themes_only dic reduced (<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only,", "pro Label in txt-Datei # (reduced_labels reduziert auf Labels mit mehr als 100", "text # preprocessing #========================== # convert text to lower-case, remove punctuation and stopwords", "environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie et développement_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"],", "und testset von all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_rl_csv:", "in themes_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(themes_dic))) # In[13]: disciplines_dic = {\"administration", "tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines)))", "token in tokens: #if token in stopWords: if token in stopwords: continue else:", "\"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie", "# Visualisierung des all_label_dics in einer Wortwolke # Create a list of word", "\"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"],", "= 0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]: # speichert train- und testset von all_labels", "vocab: vocab[word] += 1 else: vocab[word] = 1 \"\"\" # read stopwords words", "value)) # In[17]: # reduced_labels reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic)", "\"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" :", "= text.lower() # remove punctuation for char in ['0', '1', '2', '3', '4',", "représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\", \"Vie de la recherche_T\"],", "# module for csv output from sklearn.model_selection import train_test_split # module to split", "small_classes = [] reduced_labelset = [] for key, value in label_dic.items(): if value", "de l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\",", "Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s :", "\"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"],", "newline='', encoding=\"utf-8\") as train_to_csv: train_to = csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\", \"classes\", \"text\"])", "as train_al_csv: train_al = csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "train_to = csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "tmp_ident): # Blog \"aes - <NAME>\" hat nur Thèmes: Histoire, Religions tmp_label3 =", "= self.split_csv(documents) for text in texts: #print(\"\\n text in iter:\", text) yield text", "encoding=\"utf-8\") as test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for", "\"Langue et linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie du langage et linguistique_D\", \"Langue et", "on the y-axis plt.yticks(y_pos, bars) # Show graphic plt.show() # Save as SVG:", "value)) # In[28]: # schreibt filename, classes, text von disciplines_only in csv-Datei with", "religieuse_T\", \"Anthropologie sociale_T\"], \"Études des sciences_T\" : [\"Histoire des sciences_T\", \"Philosophie des sciences_T\",", "zip(z_train_to, y_train_to, X_train_to): labellist = \", \".join(labels) textlist = \" \".join(texts) train_to.writerow([ident, labellist,", "\".join(texts) train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_do_csv: test_do =", "höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt die Anzahl der Blogbeiträge pro", "as test_to_csv: test_to = csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "+ text) documents = openfile.readlines() openfile.close() texts = self.split_csv(documents) for text in texts:", "labels, texts in zip(z_test_do, y_test_do, X_test_do): labellist = \", \".join(labels) textlist = \"", "accessing and preprocessing the data folder = '../Preprocessing' datasets = '../Datasets' file =", "labels, z = filnames X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced,", "(themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for key, value in sorted(themes_only_dic.items()):", "\"Histoire de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de la vie_T\", \"Criminologie_T\",", "3) text \"\"\" ident = [] label = [] text = [] vocabulary", "openfile = open(self.file, 'r', encoding='utf-8') # save each document as one item of", "element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\",", "encoding=\"utf-8\") as test_to_csv: test_to = csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for", "dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt", "texts in zip(identset, disciplines_only, textset): dow.writerow([ident, labels, texts]) # In[29]: # Für den", "# themes for key, value in themes_dic.items(): if element == key: tmp_all_labels.append(element) tmp_themes.append(element)", "= [] reduced_labelset = [] for key, value in label_dic.items(): if value <", "as np import re # module for regular expression operations import csv #", "in zip(identset, all_labels_reduced, textset): labellist = \", \".join(labels) textlist = \" \".join(texts) alw.writerow([ident,", "get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count = 0 classes = list(all_labels_dic) # schreibt", "idents.append(ident) labels.append(label) texts.append(text) return idents, labels, texts identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced,", "\"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989", ": [\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\", \"Belgique_T\", \"Europe centrale et orientale_T\", \"Mondes russes", "plt # module for visualization from wordcloud import WordCloud # module for wordclouds", "labelcount_dic[l] = 1 return labelcount_dic # In[6]: # löscht kleine Klassen (<100) def", "in themes_dic.items()} print(\"THEMES:\") for key, value in themes_dic.items(): print(\"%s: %s\" % (key, value))", "encoding=\"utf8\") as v: for key, value in sorted(vocab.items()): v.write(\"%s : %s\\n\" % (key,", "= csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "disciplines reduced_labels, themes_only, disciplines_only = reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]:", "# (disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for key, value in", "- transformiert alles in Kleinschrift - löscht Satz- und Sonderzeichen\"\"\" norm_text = text.lower()", "idents, labels, texts identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset) # In[30]: #", "for x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for", "\"Afrique de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique et Amérique", "\"text\"]) for ident, labels, texts in zip(identset, reduced_labels_reduced, textset): labellist = \", \".join(labels)", "key:\", element) elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element nicht", "\";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, reduced_labels_reduced, textset): labellist", "\"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire de l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie et", "punctuation and stopwords def normalize_text(self, text): \"\"\"Bereinigt den Text: - transformiert alles in", "Create horizontal bars plt.barh(y_pos, height) # Create names on the y-axis plt.yticks(y_pos, bars)", "os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing des Korpus \"\"\" # file: input data #", "\"Afrique centrale_T\", \"Afrique de l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\",", "\"Sciences auxiliaires de l'Histoire_T\", \"Vie de la recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe", "sciences sociales_D\"], \"Langue et linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie du langage et linguistique_D\",", "(key, value)) print(\"%s: %s\" % (key, value)) # In[23]: # schreibt filename, classes,", "test_al.writerow([ident, labellist, textlist]) # # Erstellung des Korpus reduced_labels # In[12]: # themes", "In[17]: # reduced_labels reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic =", "#tmp_label = (tmp_label1 + tmp_label2) #print(tmp_label) tmp_label = [x.strip() for x in tmp_label]", "labelset: tmp_labels = [] for element in elements: if element in small_classes: continue", "list (one document = identnumber, nace-code-list + text) documents = openfile.readlines() openfile.close() texts", "test_size = 0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]: # speichert train- und testset von", "In[6]: # löscht kleine Klassen (<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen, denen", "WordCloud # module for wordclouds # In[2]: # Class for accessing and preprocessing", "In[2]: # Class for accessing and preprocessing the data folder = '../Preprocessing' datasets", "SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung des all_label_dics in einer Wortwolke", "not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing des Korpus \"\"\" # file: input", ": [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail social et politique sociale_D\" :", "in txt-Datei # (reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for key,", "get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt die Anzahl der", "all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_al_csv: train_al = csv.writer(train_al_csv,", "\"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de la santé", "# Create a list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ')", "reduced_labelset, small_classes # # Erstellung des Korpus all_labels # In[7]: # all_labels dic", "# module to split data into train and test sets import matplotlib.pyplot as", "appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie du développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie", "as dor: for key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" % (key, value))", "pro Label in txt-Datei # (disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do:", "as test_do_csv: test_do = csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "= \";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_do, y_train_do, X_train_do):", "du nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique de l'Est_T\", \"Afrique", "et de la communication_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Sciences politiques_D\"", "plt.barh(y_pos, height) # Create names on the y-axis plt.yticks(y_pos, bars) # Show graphic", "pro Label in txt-Datei # (reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl:", "# schreibt filename, classes, text von all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='',", "open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_to_csv: test_to = csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\",", "# # Erstellung des Korpus disciplines_only # In[26]: # disciplines_only dic disciplines_only_dic =", "themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene", "(disciplines_only reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as", "textlist = \" \".join(texts) test_rl.writerow([ident, labellist, textlist]) # # Erstellung des Korpus themes_only", "reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for key, value in sorted(disciplines_only_dic.items()): do.write(\"%s :", "import os import numpy as np import re # module for regular expression", "value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key,", "x in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key, value in vocab.items(): if key", "key, value in disciplines_dic.items(): if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element)", "y an der Stelle %s: %s\" % (i, elements)) for element in elements:", "[\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation et", "\"Psychologie clinique_D\", \"Psychologie du développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie", "in zip(z_test_rl, y_test_rl, X_test_rl): labellist = \", \".join(labels) textlist = \" \".join(texts) test_rl.writerow([ident,", "siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\",", "# new y themes = [] disciplines = [] for i, elements in", "administration_D\" : [\"Ergonomie_D\", \"Travail et relations professionnelles_D\", \"Planification et développement_D\", \"Transports_D\", \"Management et", "value in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key,", "als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for key, value in sorted(themes_only_reduced_dic.items()):", "value print(\"Anzahl der vergebenen Labels:\", count) # In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic)", "# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels reduziert", "centrale_T\", \"Asie du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\",", "tmp_label2) #print(tmp_label) tmp_label = [x.strip() for x in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for", ": [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" :", "\".join(texts) test_rl.writerow([ident, labellist, textlist]) # # Erstellung des Korpus themes_only # In[21]: #", "open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" %", "= filnames X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size", "label, text, vocabulary # In[3]: # get corpus from disk identset, labelset, textset,", "label_dic): \"\"\"Löscht die Klassen, denen weniger als 100 Blogbeiträge zugeordnet sind\"\"\" small_classes =", "\"Haut Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"],", "texts in zip(z_test_rl, y_test_rl, X_test_rl): labellist = \", \".join(labels) textlist = \" \".join(texts)", "= norm_text.replace(char, ' ') tokens = norm_text.split() vocab = {} for word in", "module for csv output from sklearn.model_selection import train_test_split # module to split data", "der vergebenen Labels:\", count) # In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic =", "= \", \".join(labels) textlist = \" \".join(texts) test_do.writerow([ident, labellist, textlist]) # # Visualisierungen", "print(\"%s: %s\" % (key, value)) # In[18]: # schreibt filename, classes, text von", "& histoire de la géographie_T\", \"Espace_T\", \"société et territoire_T\", \"Géographie : politique_T\", \"culture", "la géographie_T\", \"Espace_T\", \"société et territoire_T\", \"Géographie : politique_T\", \"culture et représentation_T\", \"Géographie", "et actions publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\" : [\"Études", "X_train_do): labellist = \", \".join(labels) textlist = \" \".join(texts) train_do.writerow([ident, labellist, textlist]) with", "for ident, labels, texts in zip(z_train_rl, y_train_rl, X_train_rl): labellist = \", \".join(labels) textlist", "vocab #return norm_text, vocab return tokens, vocab # split identnumber, nace-code-list and corporate", "\"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie", "tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 + tmp_label2) #print(tmp_label) tmp_label = [x.strip() for x", "\"Information_T\" : [\"Édition électronique_T\", \"Histoire et sociologie de la presse_T\", \"Histoire et sociologie", "In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: # reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf", "du politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\",", "\" \".join(texts) train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_do_csv: test_do", "in zip(z_train_to, y_train_to, X_train_to): labellist = \", \".join(labels) textlist = \" \".join(texts) train_to.writerow([ident,", "latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique et Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\"", "gefunden:\", element) # discipilnes for key, value in disciplines_dic.items(): if element == key:", "Labels:\", count) # In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen", "\"classes\", \"text\"]) for ident, labels, texts in zip(z_train_rl, y_train_rl, X_train_rl): labellist = \",", "\"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire", "in label: if l in labelcount_dic: labelcount_dic[l] += 1 else: labelcount_dic[l] = 1", "csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_do_csv: train_do = csv.writer(train_do_csv, delimiter =", "in txt-Datei # (themes_only reduziert auf Labels mit mehr als 100 Blogbeiträgen) with", "key, value in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" %", "und testset # x = text, y = labels, z = filnames X_train_do,", "for wordclouds # In[2]: # Class for accessing and preprocessing the data folder", "class MyCorpus(object): \"\"\" Preprocessing des Korpus \"\"\" # file: input data # def", "\"conflits_T\", \"violence_T\", \"Génocides et massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements politiques et sociaux_T\",", "\"Étude des genres_T\", \"Sociologie de la consommation_T\", \"Sociologie de la culture_T\", \"Sociologie de", "= open(self.file, 'r', encoding='utf-8') # save each document as one item of a", "for word in tokens: if word in vocab: vocab[word] += 1 else: vocab[word]", "print(\"Anzahl der vergebenen Labels:\", count) # In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic", "\"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\", \"Asie du Sud-Est_T\", \"Extrême Orient_T\",", "themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als 100", "def reduce_labels(y): \"\"\"Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe\"\"\" labels =", "rlw = csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "# In[13]: disciplines_dic = {\"administration publique et développement_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\",", "X_test_do): labellist = \", \".join(labels) textlist = \" \".join(texts) test_do.writerow([ident, labellist, textlist]) #", "#print(\"\\nTheme key:\", element) elif element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element", "Class for accessing and preprocessing the data folder = '../Preprocessing' datasets = '../Datasets'", "politiques_D\", \"Administration publique_D\"], \"Sociologie et anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"],", "tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key, value in vocab.items(): if key in vocabulary:", "highest level def reduce_labels(y): \"\"\"Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe\"\"\"", "1) filnames 2) labels 3) text \"\"\" ident = [] label = []", "[\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire de l'éducation_T\", \"Sciences", "file, x, y): def __init__(self, file): self.file = file # memory friendlys because", "'-', '–', '—', '­', '„', '“', '■', '•', '§', '$', '@', '€', '&',", "'„', '“', '■', '•', '§', '$', '@', '€', '&', '%', '&', '+', '*',", "# Erstellung des Korpus all_labels # In[7]: # all_labels dic all_labels_dic = get_label_dic(labelset)", "value in themes_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(themes_dic))) # In[13]: disciplines_dic =", "read stopwords words = [] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for token in tokens:", "if element in blacklist: textliste.remove(element) text = str(textliste).replace(\"'\", \"\") # Create the wordcloud", "text.lower() # remove punctuation for char in ['0', '1', '2', '3', '4', '5',", "test_rl = csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie & histoire de la géographie_T\",", "tow = csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", ": [\"Ergonomie_D\", \"Travail et relations professionnelles_D\", \"Planification et développement_D\", \"Transports_D\", \"Management et administration_D\"],", "= [x.strip()+'_d' for x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 =", "train- und testset von all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as", "linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\", \"Théorie et critique littéraires_D\", \"Littérature", "\"Travail social_D\"]} disciplines_dic = {k.lower(): [i.lower() for i in v] for k, v", "%s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[23]: # schreibt", "delimiter = \";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_do, y_test_do,", "self.file = file # memory friendlys because doesn't load the corpus into memory!", "(key, value)) print(\"%s: %s\" % (key, value)) # In[22]: # themes_only dic reduced", "[] for key, value in label_dic.items(): if value < 100: #print(\\\"%s : %s\\\"", "mit weniger als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\",", "corpus into memory! def __iter__(self): openfile = open(self.file, 'r', encoding='utf-8') # save each", "]: pictures = '../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures) # In[32]: # Histogramm: Blogs", "train_test_split # module to split data into train and test sets import matplotlib.pyplot", "# speichert train-, validation- und testset von themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w',", "'').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\")", "\"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic = {k.lower(): [i.lower() for i in v] for", "die Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only nicht reduziert) with", "1 else: labelcount_dic[l] = 1 return labelcount_dic # In[6]: # löscht kleine Klassen", "philosophie des sciences_D\", \"Histoire des sciences sociales_D\"], \"Langue et linguistique_D\" : [\"Linguistique appliquée_D\",", "labels, texts in zip(z_train_to, y_train_to, X_train_to): labellist = \", \".join(labels) textlist = \"", "régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail social et politique sociale_D\" : [\"Études des relations", "tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element nicht gefunden:\", element) # discipilnes for key, value", "all_labels # In[7]: # all_labels dic all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic)", "tmp_label3 = [x.strip()+'_t' for x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab", "labels, texts in zip(z_train_do, y_train_do, X_train_do): labellist = \", \".join(labels) textlist = \"", ": [\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire de l'éducation_T\",", "= \";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_to, y_train_to, X_train_to):", "csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as rl_csv: rlw = csv.writer(rl_csv, delimiter =", "'w', newline='', encoding=\"utf-8\") as test_to_csv: test_to = csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\", \"classes\",", "Anzahl der Blogbeiträge pro Label (Label : Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic =", "value in disciplines_dic.items(): if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif", "Satz- und Sonderzeichen\"\"\" norm_text = text.lower() # remove punctuation for char in ['0',", "zur Anzahl der Blogbeiträge pro Label (Label : Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic", "= [] if re.match(\"aes_\", tmp_ident): # Blog \"aes - <NAME>\" hat nur Thèmes:", "et de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\", \"Vie de", "in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value))", "for key, value in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\"", "# In[21]: # themes_only dic themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte Themen:\",", "norm_text, vocab return tokens, vocab # split identnumber, nace-code-list and corporate purpose and", "interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie", "vocabulary[key] = value return ident, label, text, vocabulary # In[3]: # get corpus", "open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as rl_csv: rlw = csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\",", "publique et développement_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Arts et humanités_D\"", "de l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches biographiques_T\", \"Approches de corpus_T\",", "import numpy as np import re # module for regular expression operations import", "labels 3) text \"\"\" ident = [] label = [] text = []", "= get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt die Anzahl", "100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt", "\"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\", \"Asie du Sud-Est_T\",", "médias_T\", \"Histoire et sociologie du livre_T\", \"Sciences de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"],", "\"classes\", \"text\"]) for ident, labels, texts in zip(identset, disciplines_only, textset): dow.writerow([ident, labels, texts])", "des femmes_T\", \"Histoire du travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\",", "open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for key, value in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" %", "'€', '&', '%', '&', '+', '*', '=', '#', '«', '»', '≥', '<', '>',", "= tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for x in tmp_label1] tmp_label.extend(tmp_label1) tmp_label2 =", "\".join(labels) textlist = \" \".join(texts) train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\")", "[\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Arts et humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études", "In[5]: def get_label_dic(y): \"\"\"Erstellt ein dictionary zur Anzahl der Blogbeiträge pro Label (Label", "\"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"],", "affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation et sciences de l'éducation_D\", \"Éducation : disciplines", "'[', ']', '{', '}', '/', '\\\\', '|', '_', '-', '–', '—', '­', '„',", "delimiter = \";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_to, y_train_to,", "Imports import os import numpy as np import re # module for regular", "import train_test_split # module to split data into train and test sets import", "module to split data into train and test sets import matplotlib.pyplot as plt", "# (all_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\")", "eingelesenen Dokuments in die drei Listen 1) filnames 2) labels 3) text \"\"\"", "v in themes_dic.items()} print(\"THEMES:\") for key, value in themes_dic.items(): print(\"%s: %s\" % (key,", "von reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as rl_csv: rlw =", "in zip(identset, themes_only, textset): tow.writerow([ident, labels, texts]) # In[24]: # splittet all_labels in", "(key, value)) # In[28]: # schreibt filename, classes, text von disciplines_only in csv-Datei", "labels, texts]) # In[29]: # Für den Blog des Archivs der Erzdiözese Salzburg", "rlr: for key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" % (key, value)) print(\"%s:", "% (key, value)) # In[28]: # schreibt filename, classes, text von disciplines_only in", "Archivs der Erzdiözese Salzburg (aes) wurden keine Disziplinen gewählt, # dementsprechend wird dieser", "%s\\n\" % (key, value)) # In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) #", "csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as al_csv: alw = csv.writer(al_csv, delimiter =", "themes_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(themes_dic))) # In[13]: disciplines_dic = {\"administration publique", "#if token in stopWords: if token in stopwords: continue else: words.append(token)\"\"\" #return words,", "\"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie du", "labellist = \", \".join(labels) textlist = \" \".join(texts) rlw.writerow([ident, labellist, textlist]) # In[19]:", "Korpus themes_only # In[21]: # themes_only dic themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene", "labels, texts in zip(z_train_rl, y_train_rl, X_train_rl): labellist = \", \".join(labels) textlist = \"", "themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as to_csv: tow = csv.writer(to_csv,", "'../Datasets' file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object):", "testset von all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_al_csv: train_al", "\"Génocides et massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements politiques et sociaux_T\", \"Politiques et", "in stopwords: continue else: words.append(token)\"\"\" #return words, vocab #return norm_text, vocab return tokens,", "text): \"\"\"Bereinigt den Text: - transformiert alles in Kleinschrift - löscht Satz- und", "denen weniger als 100 Blogbeiträge zugeordnet sind\"\"\" small_classes = [] reduced_labelset = []", "#print(\\\"%s : %s\\\" % (key, value)) small_classes.append(key) for elements in labelset: tmp_labels =", "\"Psychologie du développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences", "= [x.strip()+'_t' for x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 + tmp_label2) #print(tmp_label)", "Erstellung des Korpus all_labels # In[7]: # all_labels dic all_labels_dic = get_label_dic(labelset) print(\"Klassen", "\"Épistémologie et méthodes_T\": [\"Approches biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\",", "%s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[27]: # disciplines_only", "ident, labels, texts in zip(z_test_do, y_test_do, X_test_do): labellist = \", \".join(labels) textlist =", "\"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" :", "Blog \"aes - <NAME>\" hat nur Thèmes: Histoire, Religions tmp_label3 = document.split(\";\", 2)[1].strip()", "horizontal bars plt.barh(y_pos, height) # Create names on the y-axis plt.yticks(y_pos, bars) #", "document = identnumber, nace-code-list + text) documents = openfile.readlines() openfile.close() texts = self.split_csv(documents)", "\"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail social et politique sociale_D\" : [\"Études des", "sociologie des médias_T\", \"Histoire et sociologie du livre_T\", \"Sciences de l'information_T\"], \"Langage_T\" :", "norm_text = norm_text.replace(char, ' ') tokens = norm_text.split() vocab = {} for word", "#print(\"\\nDiscipline:\", key) else: (\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew", "token in stopWords: if token in stopwords: continue else: words.append(token)\"\"\" #return words, vocab", "Blog des Archivs der Erzdiözese Salzburg (aes) wurden keine Disziplinen gewählt, # dementsprechend", "reduced_labels_reduced, textset): labellist = \", \".join(labels) textlist = \" \".join(texts) rlw.writerow([ident, labellist, textlist])", "'#', '«', '»', '≥', '<', '>', '^']: norm_text = norm_text.replace(char, ' ') tokens", "as tor: for key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" % (key, value))", "file: input data # def __init__(self, file, x, y): def __init__(self, file): self.file", "ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key, value in vocab.items(): if key in vocabulary: vocabulary[key]", "'').split(' ') blacklist = ['et', 'du', 'études', 'de', 'des', 'la', 'dict_keys'] for element", "In[9]: # schreibt filename, classes, text von all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w',", "électronique_T\", \"Histoire et sociologie de la presse_T\", \"Histoire et sociologie des médias_T\", \"Histoire", "Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\",", "# Visualisierungen # In[ ]: pictures = '../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures) #", "culture_T\", \"Sociologie de la santé_T\", \"Sociologie du travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport", "value in sorted(themes_only_dic.items()): to.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key,", "csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset,", "politiques_T\", \"Mouvements politiques et sociaux_T\", \"Politiques et actions publiques_T\", \"Relations internationales_T\", \"Sciences politiques_T\",", "+= value print(\"Anzahl der vergebenen Labels:\", count) # In[8]: all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset,", "schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only nicht reduziert)", "interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_per_all_labels_wordcloud.svg', format='svg') plt.savefig(pictures+'/Blogs_per_all_labels_wordcloud.png', format='png')", "et services de santé_D\", \"Sciences et pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de", "\"Afrique de l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\",", "if not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing des Korpus \"\"\" # file:", "\"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\"", "sciences_T\"], \"Études du politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et massacres_T\", \"Histoire politique_T\",", "'w', newline='', encoding=\"utf-8\") as train_do_csv: train_do = csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\", \"classes\",", "'%', '&', '+', '*', '=', '#', '«', '»', '≥', '<', '>', '^']: norm_text", "open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" %", "\"text\"]) for ident, labels, texts in zip(z_train_do, y_train_do, X_train_do): labellist = \", \".join(labels)", "(key, value)) print(\"%s: %s\" % (key, value)) # In[18]: # schreibt filename, classes,", "for ident, labels, texts in zip(z_test_al, y_test_al, X_test_al): labellist = \", \".join(labels) textlist", "key) else: (\"Element nicht gefunden:\", element) # discipilnes for key, value in disciplines_dic.items():", "mit weniger als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\",", "as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung des all_label_dics in einer", "stopwords: continue else: words.append(token)\"\"\" #return words, vocab #return norm_text, vocab return tokens, vocab", "\"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\", \"Sociologie", "labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_do_csv: test_do = csv.writer(test_do_csv, delimiter", "%s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[28]: # schreibt", "labelset:\", labels) return labels, themes, disciplines reduced_labels, themes_only, disciplines_only = reduce_labels(labelset) # In[15]:", "# In[24]: # splittet all_labels in train- und testset # x = text,", "matplotlib.pyplot as plt # module for visualization from wordcloud import WordCloud # module", "yield text # preprocessing #========================== # convert text to lower-case, remove punctuation and", "\"Théorie et critique littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management et administration_D\" :", "punctuation for char in ['0', '1', '2', '3', '4', '5', '6', '7', '8',", "open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_do_csv: train_do = csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\",", "nur Thèmes: Histoire, Religions tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3", "= \" \".join(texts) alw.writerow([ident, labellist, textlist]) # In[10]: # splittet all_labels in train-", "[] label = [] text = [] vocabulary = {} # first row", "in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', ',',", "textlist = \" \".join(texts) train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as", "for key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\"", "\" \".join(texts) alw.writerow([ident, labellist, textlist]) # In[10]: # splittet all_labels in train- und", "\"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement et de représentation_T\",", "'…','·', '·', '\"', '„', '“', '”', \"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\", '(', ')',", "rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\", \"Histoire et sociologie de", "#print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return labels, themes, disciplines reduced_labels,", "tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 =", "themes themes_dic = {\"Afrique_T\" : [\"Afrique du nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\",", "if element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif element in value:", "print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte Themen (reduziert):\",", "if l in labelcount_dic: labelcount_dic[l] += 1 else: labelcount_dic[l] = 1 return labelcount_dic", "\"Éducation_T\" : [\"Histoire de l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches biographiques_T\",", "value)) small_classes.append(key) for elements in labelset: tmp_labels = [] for element in elements:", ": [\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\",", "one item of a list (one document = identnumber, nace-code-list + text) documents", "\"Histoire culturelle_T\", \"Histoire de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de la", "csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as do_csv: dow = csv.writer(do_csv, delimiter =", "textlist]) # # Visualisierungen # In[ ]: pictures = '../Visualisierungen' if not os.path.exists(pictures):", "all_labels_reduced, textset): labellist = \", \".join(labels) textlist = \" \".join(texts) alw.writerow([ident, labellist, textlist])", "de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\" : [\"Âges de la vie_T\", \"Criminologie_T\", \"Démographie_T\",", "reduzierte Themen:\", len(themes_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei", "Visualisierungen # In[ ]: pictures = '../Visualisierungen' if not os.path.exists(pictures): os.makedirs(pictures) # In[32]:", "\"Histoire urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\", \"Histoire et sociologie de la presse_T\", \"Histoire", "texts = self.split_csv(documents) for text in texts: #print(\"\\n text in iter:\", text) yield", "delimiter = \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_rl, y_train_rl,", "x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 + tmp_label2) #print(tmp_label) tmp_label = [x.strip()", "in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as to_csv: tow = csv.writer(to_csv, delimiter", "texts in zip(z_train_to, y_train_to, X_train_to): labellist = \", \".join(labels) textlist = \" \".join(texts)", "delimiter = \";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_do, y_train_do,", "{} #tmp_label = \", \" for label in y: for l in label:", "csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset,", "\", \".join(labels) textlist = \" \".join(texts) alw.writerow([ident, labellist, textlist]) # In[10]: # splittet", ": [\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"], \"Sociologie_T\"", "in v] for k, v in disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value in disciplines_dic.items():", "noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique de l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\" :", "file): self.file = file # memory friendlys because doesn't load the corpus into", "\"Sciences et pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information et de la", "langage et linguistique_D\", \"Langue et linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\", \"Théorie et critique", "test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_al, y_test_al, X_test_al): labellist =", "i, elements in enumerate(y): tmp_all_labels = [] tmp_themes = [] tmp_disciplines = []", "key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" %", "Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in", "# reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(reduced_labels_dic))", "# In[5]: def get_label_dic(y): \"\"\"Erstellt ein dictionary zur Anzahl der Blogbeiträge pro Label", "ident, label, text in zip(identset, labelset, textset): if ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label)", "expression operations import csv # module for csv output from sklearn.model_selection import train_test_split", "continue else: words.append(token)\"\"\" #return words, vocab #return norm_text, vocab return tokens, vocab #", ": [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie biologique_D\",", "with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\"", "texts.append(text) return idents, labels, texts identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset) #", "delimiter = \";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, all_labels_reduced,", "die Themen und Disziplinen auf die höchste Hierarchiestufe\"\"\" labels = [] # new", "X_test_to, y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced,", "relations professionnelles_D\", \"Planification et développement_D\", \"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales", "filename, classes, text von reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as", "auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for", "keine Disziplinen gewählt, # dementsprechend wird dieser Blog aus disciplines_only entfernt def delete_blog(identset,", "\"`\", \"’\", \"‘\", \"‚\",\"'\", '(', ')', '[', ']', '{', '}', '/', '\\\\', '|',", "def split_csv(self, documents): \"\"\"Splittet jede Zeile des eingelesenen Dokuments in die drei Listen", "labelset, textset, vocab = MyCorpus(file) # save vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\")", "reduced_labels, themes_only, disciplines_only = reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: #", "in txt-Datei # (all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for key,", "themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_to_csv: train_to = csv.writer(train_to_csv,", "themes_only, textset): tow.writerow([ident, labels, texts]) # In[24]: # splittet all_labels in train- und", "= text, y = labels, z = filnames X_train_al, X_test_al, y_train_al, y_test_al, z_train_al,", "Create a list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist", "zip(identset, disciplines_only, textset): dow.writerow([ident, labels, texts]) # In[29]: # Für den Blog des", "labels.append(label) texts.append(text) return idents, labels, texts identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset)", "ident, labels, texts in zip(z_test_al, y_test_al, X_test_al): labellist = \", \".join(labels) textlist =", "méthodes_T\": [\"Approches biographiques_T\", \"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital", "themes, disciplines reduced_labels, themes_only, disciplines_only = reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) #", "\".join(texts) train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_al_csv: test_al =", "text = str(textliste).replace(\"'\", \"\") # Create the wordcloud object wordcloud = WordCloud(width=680, height=680,", "test_do = csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "for label in y: for l in label: if l in labelcount_dic: labelcount_dic[l]", "= [] texts = [] for ident, label, text in zip(identset, labelset, textset):", "train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_al, y_train_al, X_train_al): labellist =", "\"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des femmes_T\", \"Histoire du", "vocab.items(): if key in vocabulary: vocabulary[key] += value else: vocabulary[key] = value return", "Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in", "\".join(texts) alw.writerow([ident, labellist, textlist]) # In[10]: # splittet all_labels in train- und testset", "schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels nicht reduziert)", "encoding=\"utf8\") as rlr: for key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" % (key,", "de l'information et de la communication_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"],", "= filnames X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size", "in enumerate(y): tmp_all_labels = [] tmp_themes = [] tmp_disciplines = [] #print(\"\\nlabels in", "gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return labels, themes,", "in zip(identset, disciplines_only, textset): dow.writerow([ident, labels, texts]) # In[29]: # Für den Blog", "labellist, textlist]) # In[19]: # splittet all_labels in train- und testset # x", "= train_test_split(textset, all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42) # In[11]: # speichert", "\"\"\" Preprocessing des Korpus \"\"\" # file: input data # def __init__(self, file,", "key in vocabulary: vocabulary[key] += value else: vocabulary[key] = value return ident, label,", "csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_al,", "train_do = csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" %", "newline='', encoding=\"utf-8\") as test_do_csv: test_do = csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\", \"classes\", \"text\"])", "[] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for token in tokens: #if token in stopWords:", "# reduce labels to highest level def reduce_labels(y): \"\"\"Reduziert die Themen und Disziplinen", "(key, value)) # In[9]: # schreibt filename, classes, text von all_labels in csv-Datei", "alw.writerow([ident, labellist, textlist]) # In[10]: # splittet all_labels in train- und testset #", "die Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels reduziert auf Labels", "% (key, value)) print(\"%s: %s\" % (key, value)) # In[18]: # schreibt filename,", "politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements", "Klassen insgesammt:\", len(reduced_labels_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei", "plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung des all_label_dics in einer Wortwolke # Create a", "txt-Datei # (reduced_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\",", "X_train_al): labellist = \", \".join(labels) textlist = \" \".join(texts) train_al.writerow([ident, labellist, textlist]) with", "et sciences de l'éducation_D\", \"Éducation : disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\" :", "speichert train-, validation- und testset von disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='',", "l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique et", "y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42) #", "+= 1 else: labelcount_dic[l] = 1 return labelcount_dic # In[6]: # löscht kleine", "as do: for key, value in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" % (key, value))", "= document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\",", "corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de", "\"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes et scandinaves_T\", \"Péninsule ibérique_T\",", "len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die Anzahl", "\"Littérature_D\"], \"Management et administration_D\" : [\"Ergonomie_D\", \"Travail et relations professionnelles_D\", \"Planification et développement_D\",", "csv output from sklearn.model_selection import train_test_split # module to split data into train", "Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\",", "% (key, value)) print(\"%s: %s\" % (key, value)) # In[27]: # disciplines_only dic", "= [] labels = [] texts = [] for ident, label, text in", "# Create the wordcloud object wordcloud = WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) # Display", "txt-Datei # (reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for key, value", "0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]: # speichert train- und testset von all_labels in", "Autorin: <NAME> # In[1]: # Imports import os import numpy as np import", "femmes_T\", \"Histoire du travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire", "industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\", \"Histoire et", "100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s", "'du', 'études', 'de', 'des', 'la', 'dict_keys'] for element in textliste: if element in", "reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr:", "nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s", "religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire de", "from wordcloud import WordCloud # module for wordclouds # In[2]: # Class for", "len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label", "splittet all_labels in train- und testset # x = text, y = labels,", "first row is headline for i, document in enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0]", "text, y = labels, z = filnames X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do", "labellist = \", \".join(labels) textlist = \" \".join(texts) train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv',", "\"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement", ": [\"Études littéraires_D\", \"Théorie et critique littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management", "for x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2])", "# all_labels dic all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count = 0", "labellist = \", \".join(labels) textlist = \" \".join(texts) alw.writerow([ident, labellist, textlist]) # In[10]:", "# x = text, y = labels, z = filnames X_train_to, X_test_to, y_train_to,", "for x in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key, value in vocab.items(): if", "\"Psychologie_T\"], \"Religions_T\" : [\"Histoire des religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études", "l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas Moyen Âge_T\", \"Haut Moyen", "et linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\", \"Théorie et critique littéraires_D\", \"Littérature britannique_D\", \"Littérature", "for key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\"", "sciences_T\", \"Philosophie des sciences_T\", \"Sociologie des sciences_T\"], \"Études du politique_T\" : [\"Guerres_T\", \"conflits_T\",", "np.arange(len(bars)) # Create horizontal bars plt.barh(y_pos, height) # Create names on the y-axis", "politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\", \"Asie du Sud-Est_T\", \"Extrême", "et aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et environnement_T\",", "as bpl: for key, value in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" % (key, value))", "encoding=\"utf8\") as bplr: for key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" % (key,", "test_to = csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "in zip(z_test_do, y_test_do, X_test_do): labellist = \", \".join(labels) textlist = \" \".join(texts) test_do.writerow([ident,", "print(\"%s: %s\" % (key, value)) # In[27]: # disciplines_only dic reduced (<100) disciplines_only_reduced,", "os.path.exists(pictures): os.makedirs(pictures) # In[32]: # Histogramm: Blogs pro all_labels (besser in excel visualisieren)", "= {} for word in tokens: if word in vocab: vocab[word] += 1", "Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro", "\"géographie et développement_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire et archéologie_D\" :", "textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_to_csv: test_to = csv.writer(test_to_csv, delimiter =", "load the corpus into memory! def __iter__(self): openfile = open(self.file, 'r', encoding='utf-8') #", "\";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_to, y_train_to, X_train_to): labellist", "disk identset, labelset, textset, vocab = MyCorpus(file) # save vocabulary to file with", "csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_to_csv: train_to = csv.writer(train_to_csv, delimiter =", "# # Erstellung des Korpus all_labels # In[7]: # all_labels dic all_labels_dic =", "wurden keine Disziplinen gewählt, # dementsprechend wird dieser Blog aus disciplines_only entfernt def", "l'information et bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie", "publique_D\"], \"Arts et humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\",", "disciplines_only entfernt def delete_blog(identset, labelset, textset): idents = [] labels = [] texts", "du travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport et loisirs_T\"]} themes_dic = {k.lower(): [i.lower()", "\"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à de nos jours_T\", \"XXIe siècle_T\"],", "\" \".join(texts) train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_al_csv: test_al", "in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_al_csv: train_al = csv.writer(train_al_csv, delimiter", "schreibt filename, classes, text von disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\")", "labels, texts in zip(identset, all_labels_reduced, textset): labellist = \", \".join(labels) textlist = \"", "each document as one item of a list (one document = identnumber, nace-code-list", "der Stelle %s: %s\" % (i, elements)) for element in elements: #print(\"\\nLabel:\", element)", "= 0.20, stratify=themes_only_reduced, random_state=42) # In[25]: # speichert train-, validation- und testset von", "tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab = self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\", 3)[2]", "print(\"%s: %s\" % (key, value)) # In[17]: # reduced_labels reduced dic (<100) reduced_labels_reduced,", "value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key,", "in txt-Datei # (disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for key,", "textlist = \" \".join(texts) train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as", "la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\", \"Sociologie de la consommation_T\", \"Sociologie de", "(reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei #", "plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]: # Visualisierung des all_label_dics in einer Wortwolke #", "schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels nicht reduziert)", "encoding=\"utf-8\") as al_csv: alw = csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for", "X_test_al): labellist = \", \".join(labels) textlist = \" \".join(texts) test_al.writerow([ident, labellist, textlist]) #", "element in textliste: if element in blacklist: textliste.remove(element) text = str(textliste).replace(\"'\", \"\") #", "%s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) count += value print(\"Anzahl", "urbaine_T\", \"Sport et loisirs_T\"]} themes_dic = {k.lower(): [i.lower() for i in v] for", "'la', 'dict_keys'] for element in textliste: if element in blacklist: textliste.remove(element) text =", "textlist]) # # Erstellung des Korpus disciplines_only # In[26]: # disciplines_only dic disciplines_only_dic", "reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor:", "sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) #", "texts = [] for ident, label, text in zip(identset, labelset, textset): if ident.startswith('aes_'):", "delimiter = \";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, themes_only,", "labels, texts in zip(identset, themes_only, textset): tow.writerow([ident, labels, texts]) # In[24]: # splittet", "continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes # # Erstellung des Korpus all_labels", "text, y = labels, z = filnames X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al", "csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_al_csv: train_al = csv.writer(train_al_csv, delimiter =", "input data # def __init__(self, file, x, y): def __init__(self, file): self.file =", "'?', '…','·', '·', '\"', '„', '“', '”', \"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\", '(',", "Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for key,", "for accessing and preprocessing the data folder = '../Preprocessing' datasets = '../Datasets' file", "in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_do_csv: train_do = csv.writer(train_do_csv, delimiter", "'•', '§', '$', '@', '€', '&', '%', '&', '+', '*', '=', '#', '«',", "insgesammt:\", len(reduced_labels_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei #", "filnames X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size =", "# discipilnes for key, value in disciplines_dic.items(): if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element)", "als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) #", "reduced_labels # In[12]: # themes themes_dic = {\"Afrique_T\" : [\"Afrique du nord_T\", \"Algérie_T\",", "höchste Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro", "\"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" :", "la culture_T\", \"Sociologie de la santé_T\", \"Sociologie du travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\",", "\"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\"", "value in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key,", "urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\", \"Histoire et sociologie de la presse_T\", \"Histoire et", "#return norm_text, vocab return tokens, vocab # split identnumber, nace-code-list and corporate purpose", "os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing des Korpus \"\"\" # file: input data", "zip(identset, all_labels_reduced, textset): labellist = \", \".join(labels) textlist = \" \".join(texts) alw.writerow([ident, labellist,", "l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique et Amérique centrale_T\", \"Pays", "textlist = \" \".join(texts) test_do.writerow([ident, labellist, textlist]) # # Visualisierungen # In[ ]:", "print(len(list(themes_dic))) # In[13]: disciplines_dic = {\"administration publique et développement_D\" : [\"Relations internationales_D\", \"Sciences", ": [\"Asie centrale_T\", \"Asie du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde", "von disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_do_csv: train_do =", "do: for key, value in sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" % (key, value)) print(\"%s:", "tmp_themes = [] tmp_disciplines = [] #print(\"\\nlabels in y an der Stelle %s:", "'w', newline='', encoding=\"utf-8\") as to_csv: tow = csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\", \"classes\",", "%s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[9]: # schreibt", "Preprocessing der Texte # # Autorin: <NAME> # In[1]: # Imports import os", "à de nos jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\" : [\"Révolution française_T\", \"XVIe siècle_T\",", "\"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des femmes_T\", \"Histoire du travail_T\", \"Histoire économique_T\",", "value else: vocabulary[key] = value return ident, label, text, vocabulary # In[3]: #", "Moyen Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\",", "# In[14]: # reduce labels to highest level def reduce_labels(y): \"\"\"Reduziert die Themen", "éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de la santé et", "print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt die Anzahl der Blogbeiträge", "an der Stelle %s: %s\" % (i, elements)) for element in elements: #print(\"\\nLabel:\",", "y_test_al, X_test_al): labellist = \", \".join(labels) textlist = \" \".join(texts) test_al.writerow([ident, labellist, textlist])", "%s\" % (key, value)) print(len(list(disciplines_dic))) # In[14]: # reduce labels to highest level", "vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"',", "in die drei Listen 1) filnames 2) labels 3) text \"\"\" ident =", "britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes et scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\"", "\"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à de nos jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\"", "\"classes\", \"text\"]) for ident, labels, texts in zip(identset, reduced_labels_reduced, textset): labellist = \",", "\"Sciences de l'information et de la communication_D\" : [\"Communication_D\", \"Sciences de l'information et", "z = filnames X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset,", "Wortwolke # Create a list of word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split('", "sorted(disciplines_only_dic.items()): do.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) #", "Âge_T\" : [\"Bas Moyen Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" :", "# In[28]: # schreibt filename, classes, text von disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv',", "= \" \".join(texts) test_do.writerow([ident, labellist, textlist]) # # Visualisierungen # In[ ]: pictures", "als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for key, value in sorted(reduced_labels_reduced_dic.items()):", ": [\"Histoire de l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie et méthodes_T\": [\"Approches biographiques_T\", \"Approches", "themes_dic.items()} print(\"THEMES:\") for key, value in themes_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(themes_dic)))", "value in vocab.items(): if key in vocabulary: vocabulary[key] += value else: vocabulary[key] =", "ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\"", "with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_do_csv: train_do = csv.writer(train_do_csv, delimiter = \";\")", "delimiter = \";\") test_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_rl, y_test_rl,", "nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as do: for key, value in sorted(disciplines_only_dic.items()): do.write(\"%s", "(\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return", "die höchste Hierarchiestufe\"\"\" labels = [] # new y themes = [] disciplines", "key) else: (\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\",", "image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_per_all_labels_wordcloud.svg', format='svg')", "train-, validation- und testset von themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\")", "santé_D\", \"Sciences et pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information et de", "droit_T\", \"Sociologie du droit_T\"], \"Économie_T\" : [\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"],", "else: (\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels)", "clinique_D\", \"Psychologie du développement_D\", \"Psychologie éducative_D\", \"Psychologie expérimentale_D\", \"Psychologie pluridisciplinaire_D\", \"Psychanalyse_D\", \"Psychologie sociale_D\"],", "quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\", \"Vie de la recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\",", "\"\"\"Reduziert die Themen und Disziplinen auf die höchste Hierarchiestufe\"\"\" labels = [] #", "= document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for x in", "k, v in disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value in disciplines_dic.items(): print(\"%s: %s\" %", "text to lower-case, remove punctuation and stopwords def normalize_text(self, text): \"\"\"Bereinigt den Text:", "all_labels_reduced, small_classes_all_labels = remove_small_classes(labelset, all_labels_dic) all_labels_reduced_dic = get_label_dic(all_labels_reduced) print(\"Klassen mit weniger als 100", "\"Sociologie des religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire de l'Art_T\",", ": %s\\\" % (key, value)) small_classes.append(key) for elements in labelset: tmp_labels = []", "# In[22]: # themes_only dic reduced (<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic", "in y an der Stelle %s: %s\" % (i, elements)) for element in", "\"Religions_T\" : [\"Histoire des religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\",", "get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte Klassen", "l'Histoire_T\", \"Vie de la recherche_T\"], \"Époque contemporaine_T\" : [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\",", "folder = '../Preprocessing' datasets = '../Datasets' file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if", "encoding=\"utf8\") as bpl: for key, value in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" % (key,", "höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt die Anzahl der Blogbeiträge pro", "= csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else:", "social et politique sociale_D\" : [\"Études des relations interethniques_D\", \"Études sur la famille_D\",", "object wordcloud = WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) # Display the generated image: plt.imshow(wordcloud,", "y_pos = np.arange(len(bars)) # Create horizontal bars plt.barh(y_pos, height) # Create names on", "= [] disciplines = [] for i, elements in enumerate(y): tmp_all_labels = []", "';', ':', '!', '?', '…','·', '·', '\"', '„', '“', '”', \"´\", \"`\", \"’\",", "# schreibt filename, classes, text von reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='',", "représentation_T\", \"Géographie appliquée et aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\",", "culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études des sciences_T\" : [\"Histoire des", "print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: # reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste", "des Korpus reduced_labels # In[12]: # themes themes_dic = {\"Afrique_T\" : [\"Afrique du", "l'éducation_D\", \"Éducation : disciplines scientifiques_D\", \"Éducation spécialisée_D\"], \"Études environnementales_D\" : [\"Études environnementales_D\", \"Géographie_D\",", "= \", \".join(labels) textlist = \" \".join(texts) test_rl.writerow([ident, labellist, textlist]) # # Erstellung", "for ident, labels, texts in zip(identset, all_labels_reduced, textset): labellist = \", \".join(labels) textlist", "# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only nicht", "# In[30]: # splittet all_labels in train- und testset # x = text,", "die Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels nicht reduziert) with", "value in label_dic.items(): if value < 100: #print(\\\"%s : %s\\\" % (key, value))", "% (key, value)) # In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]:", "et relations professionnelles_D\", \"Planification et développement_D\", \"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences", "speichert train-, validation- und testset von themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='',", "\"Travail et relations professionnelles_D\", \"Planification et développement_D\", \"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\" :", "\"Géographie_D\", \"Études urbaines_D\"], \"Histoire et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie des", "'„', '“', '”', \"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\", '(', ')', '[', ']', '{',", "file # memory friendlys because doesn't load the corpus into memory! def __iter__(self):", "\"Géographie_D\", \"Études urbaines_D\"], \"géographie et développement_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire", "zip(identset, reduced_labels_reduced, textset): labellist = \", \".join(labels) textlist = \" \".join(texts) rlw.writerow([ident, labellist,", "in sorted(vocab.items()): v.write(\"%s : %s\\n\" % (key, value)) # In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000])", "{\"administration publique et développement_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Arts et", "mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as rlr: for key, value", "Blogbeiträge pro Label in txt-Datei # (themes_only reduziert auf Labels mit mehr als", "\"Études urbaines_D\"], \"géographie et développement_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire et", "française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\", \"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\",", "'w', newline='', encoding=\"utf-8\") as test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\", \"classes\",", "0.20, stratify=all_labels_reduced, random_state=42) # In[11]: # speichert train- und testset von all_labels in", "\"Sociologie de la santé_T\", \"Sociologie du travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport et", "In[23]: # schreibt filename, classes, text von themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w',", "test_size = 0.20, stratify=themes_only_reduced, random_state=42) # In[25]: # speichert train-, validation- und testset", "\"Sociologie et anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\", \"Études féministes_D\"], \"Travail social et", "\"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de l'information et", "all_labels (besser in excel visualisieren) height = list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos =", "Blogbeiträge pro Label (Label : Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label", "visualization from wordcloud import WordCloud # module for wordclouds # In[2]: # Class", "des Korpus disciplines_only # In[26]: # disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste", "[\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie et anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\",", "= (tmp_label1 + tmp_label2) #print(tmp_label) tmp_label = [x.strip() for x in tmp_label] ident.append(tmp_ident)", "\"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\", \"Histoire et sociologie", "# splittet all_labels in train- und testset # x = text, y =", "[\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"],", "\"Éducation_D\" : [\"Éducation et sciences de l'éducation_D\", \"Éducation : disciplines scientifiques_D\", \"Éducation spécialisée_D\"],", "as train_to_csv: train_to = csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "textliste: if element in blacklist: textliste.remove(element) text = str(textliste).replace(\"'\", \"\") # Create the", "littéraires_D\", \"Théorie et critique littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management et administration_D\"", "label_dic.items(): if value < 100: #print(\\\"%s : %s\\\" % (key, value)) small_classes.append(key) for", "filnames X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size =", "# speichert train- und testset von all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='',", "= document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for", "key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" %", "[\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire et", "into train and test sets import matplotlib.pyplot as plt # module for visualization", "newline='', encoding=\"utf-8\") as test_al_csv: test_al = csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\", \"classes\", \"text\"])", "texts in zip(identset, reduced_labels_reduced, textset): labellist = \", \".join(labels) textlist = \" \".join(texts)", "labels, texts in zip(identset, reduced_labels_reduced, textset): labellist = \", \".join(labels) textlist = \"", "tmp_label = [x.strip() for x in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key, value", "= csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]: def get_label_dic(y): \"\"\"Erstellt ein dictionary", "services de santé_D\", \"Sciences et pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information", "= tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label =", "# # Erstellung des Korpus reduced_labels # In[12]: # themes themes_dic = {\"Afrique_T\"", "X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset, test_size = 0.20,", "labellist = \", \".join(labels) textlist = \" \".join(texts) test_to.writerow([ident, labellist, textlist]) # #", "3)[2]) #tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\",", "= [x.strip()+'_t' for x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text, vocab =", "mit weniger als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt die", "des eingelesenen Dokuments in die drei Listen 1) filnames 2) labels 3) text", "Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die", "ident, labels, texts in zip(z_test_rl, y_test_rl, X_test_rl): labellist = \", \".join(labels) textlist =", "open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" %", "et politique sociale_D\" : [\"Études des relations interethniques_D\", \"Études sur la famille_D\", \"Questions", "du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\"", "'').replace(\"l'\", '').split(' ') blacklist = ['et', 'du', 'études', 'de', 'des', 'la', 'dict_keys'] for", "else: idents.append(ident) labels.append(label) texts.append(text) return idents, labels, texts identset, disciplines_only_reduced, textset = delete_blog(identset,", "in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_to_csv: train_to = csv.writer(train_to_csv, delimiter", "z = filnames X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced, identset,", "train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_train_do, y_train_do, X_train_do): labellist =", "\"\"\"Erstellt ein dictionary zur Anzahl der Blogbeiträge pro Label (Label : Anzahl der", "# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels reduziert", "texts identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset) # In[30]: # splittet all_labels", "et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie", "la santé et de la santé publique_D\" : [\"Éthique_D\", \"Politique et services de", "die Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only reduziert auf Labels", "féministes_D\"], \"Travail social et politique sociale_D\" : [\"Études des relations interethniques_D\", \"Études sur", "labellist = \", \".join(labels) textlist = \" \".join(texts) test_al.writerow([ident, labellist, textlist]) # #", "print(\"%s: %s\" % (key, value)) # In[23]: # schreibt filename, classes, text von", "\"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\",", "(all_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as", "\"Travail social et politique sociale_D\" : [\"Études des relations interethniques_D\", \"Études sur la", "ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie & histoire de la géographie_T\", \"Espace_T\", \"société et", "doesn't load the corpus into memory! def __iter__(self): openfile = open(self.file, 'r', encoding='utf-8')", "== key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key)", "in label_dic.items(): if value < 100: #print(\\\"%s : %s\\\" % (key, value)) small_classes.append(key)", "folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets) class MyCorpus(object): \"\"\" Preprocessing des", "zip(identset, labelset, textset): if ident.startswith('aes_'): continue else: idents.append(ident) labels.append(label) texts.append(text) return idents, labels,", "Label in txt-Datei # (themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for", "Blogbeiträge pro Label in txt-Datei # (disciplines_only nicht reduziert) with open(folder+'/blogposts_per_discipline.txt',\"w\", encoding=\"utf8\") as", "rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"],", "\"Europe centrale et orientale_T\", \"Mondes russes et soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\",", "return idents, labels, texts identset, disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset) # In[30]:", "% (key, value)) print(\"%s: %s\" % (key, value)) # In[28]: # schreibt filename,", "3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text, vocab = self.normalize_text(document.split(\";\", 4)[3]) #tmp_text = document.split(\";\", 4)[3].strip() #print(\"tmp_text:\",", "v: for key, value in sorted(vocab.items()): v.write(\"%s : %s\\n\" % (key, value)) #", "value)) print(len(list(themes_dic))) # In[13]: disciplines_dic = {\"administration publique et développement_D\" : [\"Relations internationales_D\",", "print(\"Auf höchste Hierarchieebene reduzierte Themen (reduziert):\", len(themes_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge", "train_test_split(textset, themes_only_reduced, identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42) # In[25]: # speichert train-,", "1 else: vocab[word] = 1 \"\"\" # read stopwords words = [] stopwords", "[\"Études des relations interethniques_D\", \"Études sur la famille_D\", \"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic", "word in tokens: if word in vocab: vocab[word] += 1 else: vocab[word] =", "open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" %", "de la santé_T\", \"Sociologie du travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport et loisirs_T\"]}", "bplr: for key, value in sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" % (key, value)) print(\"%s:", ": [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\",", "datasets = '../Datasets' file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets): os.makedirs(datasets)", "'–', '—', '­', '„', '“', '■', '•', '§', '$', '@', '€', '&', '%',", "for i, document in enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label =", "# In[11]: # speichert train- und testset von all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv',", "Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\",", "\"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des", "# # Erstellung des Korpus themes_only # In[21]: # themes_only dic themes_only_dic =", "# disciplines_only dic disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic))", "\"Relations internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\" :", "{k.lower(): [i.lower() for i in v] for k, v in disciplines_dic.items()} print(\"DISCIPLINES:\") for", ": [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation", "value)) print(\"%s: %s\" % (key, value)) # In[28]: # schreibt filename, classes, text", "Zeile des eingelesenen Dokuments in die drei Listen 1) filnames 2) labels 3)", "des relations interethniques_D\", \"Études sur la famille_D\", \"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic =", "= {k.lower(): [i.lower() for i in v] for k, v in themes_dic.items()} print(\"THEMES:\")", "y = labels, z = filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl =", "element in elements: #print(\"\\nLabel:\", element) # themes for key, value in themes_dic.items(): if", "\"Asie du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"],", "In[10]: # splittet all_labels in train- und testset # x = text, y", "y themes = [] disciplines = [] for i, elements in enumerate(y): tmp_all_labels", "Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for key,", "\"Sociologie de la culture_T\", \"Sociologie de la santé_T\", \"Sociologie du travail_T\", \"Sociologie économique_T\",", "des sciences_T\"], \"Études du politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et massacres_T\", \"Histoire", "reduced (<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit weniger", "iter:\", text) yield text # preprocessing #========================== # convert text to lower-case, remove", "développement_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\",", "die drei Listen 1) filnames 2) labels 3) text \"\"\" ident = []", "du livre_T\", \"Sciences de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas", "delete_blog(identset, labelset, textset): idents = [] labels = [] texts = [] for", "print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]: def get_label_dic(y): \"\"\"Erstellt ein dictionary zur", "interethniques_D\", \"Études sur la famille_D\", \"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic = {k.lower(): [i.lower()", "disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return labels, themes, disciplines reduced_labels, themes_only, disciplines_only = reduce_labels(labelset)", "in disciplines_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(disciplines_dic))) # In[14]: # reduce labels", "social_D\"]} disciplines_dic = {k.lower(): [i.lower() for i in v] for k, v in", "v] for k, v in themes_dic.items()} print(\"THEMES:\") for key, value in themes_dic.items(): print(\"%s:", "\"1989 à de nos jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\" : [\"Révolution française_T\", \"XVIe", "[\"Bas Moyen Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\",", "Für den Blog des Archivs der Erzdiözese Salzburg (aes) wurden keine Disziplinen gewählt,", "[\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie", "\"Cône sud_T\", \"Mexique et Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie", "% (i, elements)) for element in elements: #print(\"\\nLabel:\", element) # themes for key,", "\"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements politiques et sociaux_T\", \"Politiques et actions publiques_T\", \"Relations", "\";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_to, y_test_to, X_test_to): labellist", "txt-Datei # (themes_only reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\",", "textset): tow.writerow([ident, labels, texts]) # In[24]: # splittet all_labels in train- und testset", "# themes_only dic reduced (<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced)", "sciences_T\", \"Sociologie des sciences_T\"], \"Études du politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\", \"Génocides et", "höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(reduced_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge", "\"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Droit_D\" :", "Label in txt-Datei # (reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for", "z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]: #", "# Display the generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show() # Save", "(Label : Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label = \", \"", "in disciplines_dic.items(): if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif element", "\"Sciences de la santé et de la santé publique_D\" : [\"Éthique_D\", \"Politique et", "als 100 Blogbeiträgen) with open(folder+'/blogposts_per_disciplines_only_reduced.txt',\"w\", encoding=\"utf8\") as dor: for key, value in sorted(disciplines_only_reduced_dic.items()):", "Visualisierung des all_label_dics in einer Wortwolke # Create a list of word textliste=str(all_labels_dic.keys())", "höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge", "value in disciplines_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(disciplines_dic))) # In[14]: # reduce", "disciplines_dic.items(): if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif element in", "In[20]: # speichert train- und testset von all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w',", "Label in txt-Datei # (disciplines_only reduziert auf Labels mit mehr als 100 Blogbeiträgen)", "visualisieren) height = list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) # Create horizontal", "zip(z_test_to, y_test_to, X_test_to): labellist = \", \".join(labels) textlist = \" \".join(texts) test_to.writerow([ident, labellist,", ": %s\\n\" % (key, value)) # In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset))", "# memory friendlys because doesn't load the corpus into memory! def __iter__(self): openfile", "\"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" : [\"Édition", "et antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\"", "value)) # In[4]: print(identset[1000]) print(labelset[1000]) print(textset[1000]) print(len(identset)) print(len(labelset)) print(len(textset)) # In[5]: def get_label_dic(y):", "Korpus all_labels # In[7]: # all_labels dic all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic))", ": [\"Histoire des sciences_T\", \"Philosophie des sciences_T\", \"Sociologie des sciences_T\"], \"Études du politique_T\"", "= {\"administration publique et développement_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Arts", "% (key, value)) # In[23]: # schreibt filename, classes, text von themes_only in", "train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_rl_csv: test_rl = csv.writer(test_rl_csv,", "# coding: utf-8 # # Preprocessing der Texte # # Autorin: <NAME> #", "elif element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element nicht gefunden:\", element)", "die Klassen, denen weniger als 100 Blogbeiträge zugeordnet sind\"\"\" small_classes = [] reduced_labelset", "else: vocab[word] = 1 \"\"\" # read stopwords words = [] stopwords =", "qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\", \"Vie de la recherche_T\"], \"Époque contemporaine_T\"", "= [] tmp_disciplines = [] #print(\"\\nlabels in y an der Stelle %s: %s\"", "discipilnes for key, value in disciplines_dic.items(): if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline", "auf die höchste Hierarchiestufe\"\"\" labels = [] # new y themes = []", "livre_T\", \"Sciences de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas Moyen", "# In[3]: # get corpus from disk identset, labelset, textset, vocab = MyCorpus(file)", "= [] # new y themes = [] disciplines = [] for i,", "with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter = \";\")", "print(\"%s: %s\" % (key, value)) print(len(list(themes_dic))) # In[13]: disciplines_dic = {\"administration publique et", "os.makedirs(pictures) # In[32]: # Histogramm: Blogs pro all_labels (besser in excel visualisieren) height", "tmp_label.extend(tmp_label1) tmp_label2 = tmp_label2.lower().replace('\"', '').strip().split(\",\") tmp_label2 = [x.strip()+'_t' for x in tmp_label2] tmp_label.extend(tmp_label2)", "de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\", \"Vie de la", "text von reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as rl_csv: rlw", "= \";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_to, y_test_to, X_test_to):", "bpl: for key, value in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" % (key, value)) print(\"%s:", ": [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire et archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire", "(<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als", "for char in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',", "et loisirs_T\"]} themes_dic = {k.lower(): [i.lower() for i in v] for k, v", "als 100 Texten:\", len(small_classes_disciplines_only)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt (reduziert):\", len(disciplines_only_reduced_dic)) #", "'§', '$', '@', '€', '&', '%', '&', '+', '*', '=', '#', '«', '»',", "\".join(labels) textlist = \" \".join(texts) test_rl.writerow([ident, labellist, textlist]) # # Erstellung des Korpus", "In[28]: # schreibt filename, classes, text von disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w',", "\".join(labels) textlist = \" \".join(texts) rlw.writerow([ident, labellist, textlist]) # In[19]: # splittet all_labels", "\"Sciences politiques_D\", \"Administration publique_D\"], \"Arts et humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études", "convert text to lower-case, remove punctuation and stopwords def normalize_text(self, text): \"\"\"Bereinigt den", "(key, value)) print(len(list(themes_dic))) # In[13]: disciplines_dic = {\"administration publique et développement_D\" : [\"Relations", "return labels, themes, disciplines reduced_labels, themes_only, disciplines_only = reduce_labels(labelset) # In[15]: print(reduced_labels[1000]) print(themes_only[1000])", "dic reduced (<100) themes_only_reduced, small_classes_themes_only = remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit", "elements)) for element in elements: #print(\"\\nLabel:\", element) # themes for key, value in", "'*', '=', '#', '«', '»', '≥', '<', '>', '^']: norm_text = norm_text.replace(char, '", "element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element nicht gefunden:\", element) #", "coding: utf-8 # # Preprocessing der Texte # # Autorin: <NAME> # In[1]:", "\"Asie_T\" : [\"Asie centrale_T\", \"Asie du Sud-Est_T\", \"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\",", "\"Approches de corpus_T\", \"enquêtes_T\", \"archives_T\", \"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\",", "else: words.append(token)\"\"\" #return words, vocab #return norm_text, vocab return tokens, vocab # split", "value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element nicht gefunden:\", element) # discipilnes for", "intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire", "document.split(\";\", 4)[3].strip() #print(\"tmp_text:\", tmp_text) tmp_label1 = tmp_label1.lower().replace('\"', '').strip().split(\",\") tmp_label1 = [x.strip()+'_d' for x", "\"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique de l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique", "Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels reduziert auf Labels mit", "open(self.file, 'r', encoding='utf-8') # save each document as one item of a list", "'“', '”', \"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\", '(', ')', '[', ']', '{', '}',", "to.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[22]:", "dic all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count = 0 classes =", "text in iter:\", text) yield text # preprocessing #========================== # convert text to", "\"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen Âge_T\" : [\"Bas Moyen Âge_T\", \"Haut Moyen Âge_T\"],", "Stelle %s: %s\" % (i, elements)) for element in elements: #print(\"\\nLabel:\", element) #", "plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_per_all_labels_wordcloud.svg', format='svg') plt.savefig(pictures+'/Blogs_per_all_labels_wordcloud.png',", "[] text = [] vocabulary = {} # first row is headline for", "sur la famille_D\", \"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic = {k.lower(): [i.lower() for i", "du travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\"", "all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as al_csv: alw = csv.writer(al_csv,", "memory! def __iter__(self): openfile = open(self.file, 'r', encoding='utf-8') # save each document as", "\"‘\", \"‚\",\"'\", '(', ')', '[', ']', '{', '}', '/', '\\\\', '|', '_', '-',", "else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes # # Erstellung des Korpus all_labels #", "Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s :", "'{', '}', '/', '\\\\', '|', '_', '-', '–', '—', '­', '„', '“', '■',", "textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter =", "train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]: # speichert train-,", "al_csv: alw = csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for key, value", "% (key, value)) print(len(list(disciplines_dic))) # In[14]: # reduce labels to highest level def", "the corpus into memory! def __iter__(self): openfile = open(self.file, 'r', encoding='utf-8') # save", "'../Preprocessing' datasets = '../Datasets' file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if not os.path.exists(datasets):", "filename, classes, text von themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as", ": [\"Bas Moyen Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire", "print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count = 0 classes = list(all_labels_dic) # schreibt die", "löscht kleine Klassen (<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen, denen weniger als", "testset von themes_only in csv-Dateien with open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_to_csv: train_to", "= str(textliste).replace(\"'\", \"\") # Create the wordcloud object wordcloud = WordCloud(width=680, height=680, margin=0,", "z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]: #", "de la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\", \"Sociologie de la consommation_T\", \"Sociologie", "Label (Label : Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label = \",", "et soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes et scandinaves_T\",", "= remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_disciplines_only))", "reduced_labelset = [] for key, value in label_dic.items(): if value < 100: #print(\\\"%s", "économique_T\", \"Sociologie urbaine_T\", \"Sport et loisirs_T\"]} themes_dic = {k.lower(): [i.lower() for i in", "textlist]) with open(datasets+'/all_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_al_csv: test_al = csv.writer(test_al_csv, delimiter =", "print(\"%s: %s\" % (key, value)) # In[9]: # schreibt filename, classes, text von", "file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for key, value in sorted(vocab.items()): v.write(\"%s :", "in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as do_csv: dow = csv.writer(do_csv, delimiter", "rlw.writerow([ident, labellist, textlist]) # In[19]: # splittet all_labels in train- und testset #", "= csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "\"classes\", \"text\"]) for ident, labels, texts in zip(z_train_do, y_train_do, X_train_do): labellist = \",", "appliquée et aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et", "\"Archéologie_T\", \"Cartographie_T\", \"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement et de", "\"Sciences de l'information et bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration", "'w', newline='', encoding=\"utf-8\") as train_rl_csv: train_rl = csv.writer(train_rl_csv, delimiter = \";\") train_rl.writerow([\"filename\", \"classes\",", "# In[10]: # splittet all_labels in train- und testset # x = text,", "In[13]: disciplines_dic = {\"administration publique et développement_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration", "themes_dic.items(): if element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif element in", "Erzdiözese Salzburg (aes) wurden keine Disziplinen gewählt, # dementsprechend wird dieser Blog aus", "as test_al_csv: test_al = csv.writer(test_al_csv, delimiter = \";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident,", "und testset von disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_do_csv:", "\"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire de l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie et méthodes_T\":", "for element in textliste: if element in blacklist: textliste.remove(element) text = str(textliste).replace(\"'\", \"\")", "ident, labels, texts in zip(z_train_do, y_train_do, X_train_do): labellist = \", \".join(labels) textlist =", "et administration_D\" : [\"Ergonomie_D\", \"Travail et relations professionnelles_D\", \"Planification et développement_D\", \"Transports_D\", \"Management", "test_rl.writerow([ident, labellist, textlist]) # # Erstellung des Korpus themes_only # In[21]: # themes_only", "test_to_csv: test_to = csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "textset): dow.writerow([ident, labels, texts]) # In[29]: # Für den Blog des Archivs der", "= '../Preprocessing' datasets = '../Datasets' file = folder+'/de_labeled_corpus.csv' # Hypotheses-Blogposts: filename;classes;text if not", "= document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label = [] if re.match(\"aes_\", tmp_ident): # Blog", ": [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\", \"Psychologie biologique_D\", \"Psychologie clinique_D\", \"Psychologie du développement_D\",", "ein dictionary zur Anzahl der Blogbeiträge pro Label (Label : Anzahl der zugehörigen", "'2', '3', '4', '5', '6', '7', '8', '9', '.', ',', ';', ':', '!',", "'&', '%', '&', '+', '*', '=', '#', '«', '»', '≥', '<', '>', '^']:", "de la communication_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Sciences politiques_D\" :", "internationales_T\", \"Sciences politiques_T\", \"Sociologie politique_T\"], \"Études urbaines_T\" : [\"Études urbaines_T\"], \"Europe_T\" : [\"Balkans_T\",", "administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"], \"Psychiatrie_D\" : [\"Psychiatrie_D\"], \"Psychologie_D\" : [\"Psychologie appliquée_D\",", "= list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) # Create horizontal bars plt.barh(y_pos, height) # Create", "sociales_D\", \"Travail social_D\"]} disciplines_dic = {k.lower(): [i.lower() for i in v] for k,", "massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements politiques et sociaux_T\", \"Politiques et actions publiques_T\",", "= remove_small_classes(themes_only, themes_only_dic) themes_only_reduced_dic = get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_themes_only))", "= labels, z = filnames X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset,", "WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) # Display the generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0,", "britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management et administration_D\" : [\"Ergonomie_D\", \"Travail et relations professionnelles_D\",", "text von all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as al_csv: alw", "text) yield text # preprocessing #========================== # convert text to lower-case, remove punctuation", "excel visualisieren) height = list(all_labels_dic.values()) bars = list(all_labels_dic.keys()) y_pos = np.arange(len(bars)) # Create", "alw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, all_labels_reduced, textset): labellist =", "oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire des religions_T\", \"Sociologie des", "In[29]: # Für den Blog des Archivs der Erzdiözese Salzburg (aes) wurden keine", "orientale_T\", \"Mondes russes et soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays", "preprocessing the data folder = '../Preprocessing' datasets = '../Datasets' file = folder+'/de_labeled_corpus.csv' #", "__init__(self, file): self.file = file # memory friendlys because doesn't load the corpus", "in txt-Datei # (disciplines_only reduziert auf Labels mit mehr als 100 Blogbeiträgen) with", "print(\"%s: %s\" % (key, value)) # In[28]: # schreibt filename, classes, text von", "in Kleinschrift - löscht Satz- und Sonderzeichen\"\"\" norm_text = text.lower() # remove punctuation", "#return words, vocab #return norm_text, vocab return tokens, vocab # split identnumber, nace-code-list", "\", \".join(labels) textlist = \" \".join(texts) train_do.writerow([ident, labellist, textlist]) with open(datasets+'/disciplines_only_testset.csv', 'w', newline='',", "# Blog \"aes - <NAME>\" hat nur Thèmes: Histoire, Religions tmp_label3 = document.split(\";\",", ": [\"Études des relations interethniques_D\", \"Études sur la famille_D\", \"Questions sociales_D\", \"Travail social_D\"]}", "= \" \".join(texts) test_al.writerow([ident, labellist, textlist]) # # Erstellung des Korpus reduced_labels #", "sorted(all_labels_reduced_dic.items()): bplr.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) #", "# reduced_labels reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced)", "nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique de l'Est_T\", \"Afrique de", "disciplines_only_reduced, textset = delete_blog(identset, disciplines_only_reduced, textset) # In[30]: # splittet all_labels in train-", "\";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_do, y_test_do, X_test_do): labellist", "Histoire, Religions tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t'", "Âge_T\", \"Haut Moyen Âge_T\"], \"Océanie_T\" : [\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences", "[\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"géographie et développement_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études", ": Anzahl der zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label = \", \" for", "[] for ident, label, text in zip(identset, labelset, textset): if ident.startswith('aes_'): continue else:", "for csv output from sklearn.model_selection import train_test_split # module to split data into", "\"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique de l'Est_T\", \"Afrique de l'Ouest_T\"],", "\"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement et de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences", "= {\"Afrique_T\" : [\"Afrique du nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\",", "\"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\", \"Histoire", "\"Histoire sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\", \"Histoire et sociologie de la", "[\"Océanie_T\"], \"Pensée_T\" : [\"Histoire intellectuelle_T\", \"Philosophie_T\", \"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\" : [\"Égypte", "\"Extrême Orient_T\", \"Chine_T\", \"Japon_T\", \"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire", "'+', '*', '=', '#', '«', '»', '≥', '<', '>', '^']: norm_text = norm_text.replace(char,", "\"text\"]) for ident, labels, texts in zip(z_train_to, y_train_to, X_train_to): labellist = \", \".join(labels)", "[] # new y themes = [] disciplines = [] for i, elements", "du droit_T\"], \"Économie_T\" : [\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" :", "# first row is headline for i, document in enumerate(documents[1:]): tmp_ident = document.split(\";\",", "\"Sociologie_T\" : [\"Âges de la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\", \"Sociologie de", "\"Politique et services de santé_D\", \"Sciences et pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences", "\"Mondes russes et soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes", "In[18]: # schreibt filename, classes, text von reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w',", "= labels, z = filnames X_train_do, X_test_do, y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset,", "de l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique", "romane_D\", \"Littérature_D\"], \"Management et administration_D\" : [\"Ergonomie_D\", \"Travail et relations professionnelles_D\", \"Planification et", "= text, y = labels, z = filnames X_train_to, X_test_to, y_train_to, y_test_to, z_train_to,", "for key, value in themes_dic.items(): if element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\",", "auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_themes_only_reduced.txt',\"w\", encoding=\"utf8\") as tor: for", "# themes_only dic themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) #", "themes_only dic themes_only_dic = get_label_dic(themes_only) print(\"Auf höchste Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) # schreibt", "y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42) #", "\"text\"]) for ident, labels, texts in zip(z_test_to, y_test_to, X_test_to): labellist = \", \".join(labels)", ": [\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie des sciences_D\", \"Histoire des sciences sociales_D\"], \"Langue", "\", \".join(labels) textlist = \" \".join(texts) train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='',", "# x = text, y = labels, z = filnames X_train_do, X_test_do, y_train_do,", "with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_to_csv: test_to = csv.writer(test_to_csv, delimiter = \";\")", "Dokuments in die drei Listen 1) filnames 2) labels 3) text \"\"\" ident", "element in elements: if element in small_classes: continue else: tmp_labels.append(element) reduced_labelset.append(tmp_labels) return reduced_labelset,", "[x.strip()+'_t' for x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 + tmp_label2) #print(tmp_label) tmp_label", "y_train_do, y_test_do, z_train_do, z_test_do = train_test_split(textset, disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42)", "des religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire de l'Art_T\", \"Identités", "textlist = \" \".join(texts) test_al.writerow([ident, labellist, textlist]) # # Erstellung des Korpus reduced_labels", "element == key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif element in value: tmp_all_labels.append(key)", "for ident, labels, texts in zip(z_test_rl, y_test_rl, X_test_rl): labellist = \", \".join(labels) textlist", "classes, text von disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as do_csv:", "':', '!', '?', '…','·', '·', '\"', '„', '“', '”', \"´\", \"`\", \"’\", \"‘\",", "preprocessing #========================== # convert text to lower-case, remove punctuation and stopwords def normalize_text(self,", "- löscht Satz- und Sonderzeichen\"\"\" norm_text = text.lower() # remove punctuation for char", "économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire de l'éducation_T\", \"Sciences de", "with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\"", "tmp_label3 = document.split(\";\", 2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for x", "stopwords def normalize_text(self, text): \"\"\"Bereinigt den Text: - transformiert alles in Kleinschrift -", "key: tmp_all_labels.append(element) tmp_themes.append(element) #print(\"\\nTheme key:\", element) elif element in value: tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\",", "= MyCorpus(file) # save vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for", "\"Histoire du travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire rurale_T\", \"Histoire sociale_T\", \"Histoire urbaine_T\"],", "\".join(labels) textlist = \" \".join(texts) test_to.writerow([ident, labellist, textlist]) # # Erstellung des Korpus", "politiques_D\", \"Administration publique_D\"], \"Arts et humanités_D\" : [\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\",", "'$', '@', '€', '&', '%', '&', '+', '*', '=', '#', '«', '»', '≥',", "generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_per_all_labels_wordcloud.svg',", "# schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only nicht", "enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label = [] if re.match(\"aes_\", tmp_ident):", "rl: for key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s : %s\\n\" % (key, value)) print(\"%s:", "= open(folder+'/german_stopwords_plain.txt', 'r').read() for token in tokens: #if token in stopWords: if token", "identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]: # speichert train- und testset", "'_', '-', '–', '—', '­', '„', '“', '■', '•', '§', '$', '@', '€',", "for i, elements in enumerate(y): tmp_all_labels = [] tmp_themes = [] tmp_disciplines =", "\" \".join(texts) test_al.writerow([ident, labellist, textlist]) # # Erstellung des Korpus reduced_labels # In[12]:", "# In[15]: print(reduced_labels[1000]) print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: # reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels)", "labels = [] texts = [] for ident, label, text in zip(identset, labelset,", "corporate purpose and save in lists def split_csv(self, documents): \"\"\"Splittet jede Zeile des", "# (themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for key, value in", "religions_T\"], \"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire de l'Art_T\", \"Identités culturelles_T\",", "# Erstellung des Korpus reduced_labels # In[12]: # themes themes_dic = {\"Afrique_T\" :", "[\"Architecture_D\", \"Arts_D\", \"Études asiatiques_D\", \"Études anciennes_D\", \"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\",", "for l in label: if l in labelcount_dic: labelcount_dic[l] += 1 else: labelcount_dic[l]", "(reduced_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_reduced_labels_reduced.txt',\"w\", encoding=\"utf8\") as", "corpus from disk identset, labelset, textset, vocab = MyCorpus(file) # save vocabulary to", "disciplines_only_reduced, identset, test_size = 0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]: # speichert train-, validation-", "Create the wordcloud object wordcloud = WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) # Display the", "classes = list(all_labels_dic) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei", "textliste.remove(element) text = str(textliste).replace(\"'\", \"\") # Create the wordcloud object wordcloud = WordCloud(width=680,", "[x.strip() for x in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key, value in vocab.items():", "[] tmp_themes = [] tmp_disciplines = [] #print(\"\\nlabels in y an der Stelle", "insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei", "\"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\", \"Sociologie de la consommation_T\", \"Sociologie de la culture_T\",", "\"text\"]) for ident, labels, texts in zip(identset, all_labels_reduced, textset): labellist = \", \".join(labels)", "[\"Âges de la vie_T\", \"Criminologie_T\", \"Démographie_T\", \"Étude des genres_T\", \"Sociologie de la consommation_T\",", "if not os.path.exists(pictures): os.makedirs(pictures) # In[32]: # Histogramm: Blogs pro all_labels (besser in", "txt-Datei # (themes_only nicht reduziert) with open(folder+'/blogposts_per_theme.txt',\"w\", encoding=\"utf8\") as to: for key, value", "[\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études des sciences_T\" : [\"Histoire", "identset, test_size = 0.20, stratify=themes_only_reduced, random_state=42) # In[25]: # speichert train-, validation- und", "tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label = [] if re.match(\"aes_\", tmp_ident): #", "presse_T\", \"Histoire et sociologie des médias_T\", \"Histoire et sociologie du livre_T\", \"Sciences de", "Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr: for key,", "dor: for key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" % (key, value)) print(\"%s:", "\"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire des", "for ident, labels, texts in zip(identset, disciplines_only, textset): dow.writerow([ident, labels, texts]) # In[29]:", "disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_do_csv: train_do = csv.writer(train_do_csv,", "\"Représentations_T\" : [\"Architecture_T\", \"Études visuelles_T\", \"Histoire culturelle_T\", \"Histoire de l'Art_T\", \"Identités culturelles_T\", \"Patrimoine_T\"],", "\"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à de nos jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\" :", "(key, value)) # In[27]: # disciplines_only dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only,", "vocab[word] = 1 \"\"\" # read stopwords words = [] stopwords = open(folder+'/german_stopwords_plain.txt',", "all_labels dic all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count = 0 classes", "element) elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element nicht gefunden:\",", "In[1]: # Imports import os import numpy as np import re # module", "andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"],", "persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du droit_T\", \"Sociologie du droit_T\"], \"Économie_T\" :", "delimiter = \";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_al, y_test_al,", "\"XVIIIe siècle_T\"], \"Ethnologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Études", "tor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[23]:", "print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_reduced_labels)) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt", "newline='', encoding=\"utf-8\") as to_csv: tow = csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\", \"classes\", \"text\"])", "'w', newline='', encoding=\"utf-8\") as do_csv: dow = csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\", \"classes\",", "géographie_T\", \"Espace_T\", \"société et territoire_T\", \"Géographie : politique_T\", \"culture et représentation_T\", \"Géographie appliquée", "'w', newline='', encoding=\"utf-8\") as rl_csv: rlw = csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\", \"classes\",", "= np.arange(len(bars)) # Create horizontal bars plt.barh(y_pos, height) # Create names on the", "in txt-Datei # (all_labels reduziert auf Labels mit mehr als 100 Blogbeiträgen) with", "= train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20, stratify=reduced_labels_reduced, random_state=42) # In[20]: # speichert", "texts in zip(z_test_al, y_test_al, X_test_al): labellist = \", \".join(labels) textlist = \" \".join(texts)", "des sciences sociales_D\"], \"Langue et linguistique_D\" : [\"Linguistique appliquée_D\", \"Théorie du langage et", "def __init__(self, file): self.file = file # memory friendlys because doesn't load the", "in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value))", "\" \".join(texts) test_rl.writerow([ident, labellist, textlist]) # # Erstellung des Korpus themes_only # In[21]:", "\"Méthodes de traitement et de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires de", "'≥', '<', '>', '^']: norm_text = norm_text.replace(char, ' ') tokens = norm_text.split() vocab", "et massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements politiques et sociaux_T\", \"Politiques et actions", "(key, value)) # In[23]: # schreibt filename, classes, text von themes_only in csv-Datei", ": %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value)) # In[27]: #", "open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_rl_csv: test_rl = csv.writer(test_rl_csv, delimiter = \";\") test_rl.writerow([\"filename\",", "if token in stopwords: continue else: words.append(token)\"\"\" #return words, vocab #return norm_text, vocab", "Korpus \"\"\" # file: input data # def __init__(self, file, x, y): def", "(key, value)) print(\"%s: %s\" % (key, value)) # In[27]: # disciplines_only dic reduced", "professionnelles_D\", \"Planification et développement_D\", \"Transports_D\", \"Management et administration_D\"], \"Pluridisciplinarité_D\" : [\"Sciences sociales interdisciplinaires_D\"],", "\"1945-1989_T\", \"1989 à de nos jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\" : [\"Révolution française_T\",", "purpose and save in lists def split_csv(self, documents): \"\"\"Splittet jede Zeile des eingelesenen", "= csv.writer(al_csv, delimiter = \";\") alw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in", "filename, classes, text von all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\") as", "de nos jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\" : [\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe", "centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" : [\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\",", "communication_D\" : [\"Communication_D\", \"Sciences de l'information et bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations internationales_D\",", "encoding=\"utf-8\") as test_do_csv: test_do = csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for", "dementsprechend wird dieser Blog aus disciplines_only entfernt def delete_blog(identset, labelset, textset): idents =", "\"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du droit_T\", \"Sociologie du droit_T\"], \"Économie_T\" : [\"Développement économique_T\",", "print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt", "(key, value)) small_classes.append(key) for elements in labelset: tmp_labels = [] for element in", "et développement_D\" : [\"Études environnementales_D\", \"Géographie_D\", \"Études urbaines_D\"], \"Histoire et archéologie_D\" : [\"Archéologie_D\",", "\"Philosophie des sciences_T\", \"Sociologie des sciences_T\"], \"Études du politique_T\" : [\"Guerres_T\", \"conflits_T\", \"violence_T\",", "sociale_T\", \"Histoire urbaine_T\"], \"Information_T\" : [\"Édition électronique_T\", \"Histoire et sociologie de la presse_T\",", "bibliothéconomie_D\"], \"Sciences politiques_D\" : [\"Relations internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie et anthropologie_D\"", "\"\") # Create the wordcloud object wordcloud = WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) #", "Display the generated image: plt.imshow(wordcloud, interpolation='bilinear') plt.axis(\"off\") plt.margins(x=0, y=0) plt.show() # Save as", "[\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire des religions_T\", \"Sociologie des religions_T\"], \"Représentations_T\" : [\"Architecture_T\",", "\".join(labels) textlist = \" \".join(texts) test_do.writerow([ident, labellist, textlist]) # # Visualisierungen # In[", "de la santé et de la santé publique_D\" : [\"Éthique_D\", \"Politique et services", "labelcount_dic # In[6]: # löscht kleine Klassen (<100) def remove_small_classes(labelset, label_dic): \"\"\"Löscht die", "texts]) # In[29]: # Für den Blog des Archivs der Erzdiözese Salzburg (aes)", "i, document in enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\", tmp_ident) tmp_label = []", "\"Monde indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du droit_T\", \"Sociologie du", "\"´\", \"`\", \"’\", \"‘\", \"‚\",\"'\", '(', ')', '[', ']', '{', '}', '/', '\\\\',", "zip(identset, themes_only, textset): tow.writerow([ident, labels, texts]) # In[24]: # splittet all_labels in train-", "key, value in vocab.items(): if key in vocabulary: vocabulary[key] += value else: vocabulary[key]", "open(datasets+'/themes_only_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_to_csv: train_to = csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\",", "blacklist = ['et', 'du', 'études', 'de', 'des', 'la', 'dict_keys'] for element in textliste:", "#print(tmp_label) tmp_label = [x.strip() for x in tmp_label] ident.append(tmp_ident) label.append(tmp_label) text.append(tmp_text) for key,", "\";\") test_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_al, y_test_al, X_test_al): labellist", "value)) print(\"%s: %s\" % (key, value)) # In[17]: # reduced_labels reduced dic (<100)", "labellist, textlist]) # In[10]: # splittet all_labels in train- und testset # x", "reduced_labelset.append(tmp_labels) return reduced_labelset, small_classes # # Erstellung des Korpus all_labels # In[7]: #", "aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\", \"Nature_T\", \"paysage et environnement_T\", \"Systèmes_T\",", "genres_T\", \"Sociologie de la consommation_T\", \"Sociologie de la culture_T\", \"Sociologie de la santé_T\",", "themes = [] disciplines = [] for i, elements in enumerate(y): tmp_all_labels =", "aus disciplines_only entfernt def delete_blog(identset, labelset, textset): idents = [] labels = []", "'&', '+', '*', '=', '#', '«', '»', '≥', '<', '>', '^']: norm_text =", ": [\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à de", "for key, value in sorted(vocab.items()): v.write(\"%s : %s\\n\" % (key, value)) # In[4]:", "filename, classes, text von disciplines_only in csv-Datei with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as", "schreibt filename, classes, text von reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\")", "# get corpus from disk identset, labelset, textset, vocab = MyCorpus(file) # save", "und testset von all_labels in csv-Dateien with open(datasets+'/all_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\") as train_al_csv:", "'<', '>', '^']: norm_text = norm_text.replace(char, ' ') tokens = norm_text.split() vocab =", "return ident, label, text, vocabulary # In[3]: # get corpus from disk identset,", "\"Histoire et sociologie de la presse_T\", \"Histoire et sociologie des médias_T\", \"Histoire et", "%s\" % (key, value)) # In[23]: # schreibt filename, classes, text von themes_only", "\"Histoire et philosophie des sciences_D\", \"Histoire des sciences sociales_D\"], \"Langue et linguistique_D\" :", "de santé_D\", \"Sciences et pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information et", "csv.writer(test_do_csv, delimiter = \";\") test_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(z_test_do,", "nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for key, value in sorted(all_labels_dic.items()): bpl.write(\"%s", "regular expression operations import csv # module for csv output from sklearn.model_selection import", "z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size = 0.20, stratify=all_labels_reduced, random_state=42) # In[11]: #", "X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset, reduced_labels_reduced, identset, test_size = 0.20,", "[\"Afrique du nord_T\", \"Algérie_T\", \"Afrique noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique de l'Est_T\",", "list(all_labels_dic) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels", "utf-8 # # Preprocessing der Texte # # Autorin: <NAME> # In[1]: #", "{} for word in tokens: if word in vocab: vocab[word] += 1 else:", "weniger als 100 Texten:\", len(small_classes_all_labels)) print(\"Klassen insgesammt (reduziert):\", len(all_labels_reduced_dic)) # schreibt die Anzahl", "csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset,", "reduced_labels in csv-Datei with open(datasets+'/de_labeled_corpus_reduced_labels.csv', 'w', newline='', encoding=\"utf-8\") as rl_csv: rlw = csv.writer(rl_csv,", "In[24]: # splittet all_labels in train- und testset # x = text, y", "tmp_all_labels.append(key) tmp_themes.append(key) #print(\"\\nTheme:\", key) else: (\"Element nicht gefunden:\", element) # discipilnes for key,", "tmp_label2 = [x.strip()+'_t' for x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 + tmp_label2)", "newline='', encoding=\"utf-8\") as test_to_csv: test_to = csv.writer(test_to_csv, delimiter = \";\") test_to.writerow([\"filename\", \"classes\", \"text\"])", "\"\"\" ident = [] label = [] text = [] vocabulary = {}", "['et', 'du', 'études', 'de', 'des', 'la', 'dict_keys'] for element in textliste: if element", "train_do_csv: train_do = csv.writer(train_do_csv, delimiter = \";\") train_do.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "'}', '/', '\\\\', '|', '_', '-', '–', '—', '­', '„', '“', '■', '•',", "\"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\", \"Vie de la recherche_T\"], \"Époque", "'').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for x in tmp_label3] tmp_label.extend(tmp_label3) #print(\"Sonderfall:\", tmp_ident, tmp_label) tmp_text,", "rl_csv: rlw = csv.writer(rl_csv, delimiter = \";\") rlw.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\",", "text, vocabulary # In[3]: # get corpus from disk identset, labelset, textset, vocab", "\"violence_T\", \"Génocides et massacres_T\", \"Histoire politique_T\", \"Institutions politiques_T\", \"Mouvements politiques et sociaux_T\", \"Politiques", "# schreibt filename, classes, text von themes_only in csv-Datei with open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='',", "\"classes\", \"text\"]) for ident, labels, texts in zip(z_test_rl, y_test_rl, X_test_rl): labellist = \",", "labels, z = filnames X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced,", "schreibt filename, classes, text von all_labels in csv-Datei with open(datasets+'/de_labeled_corpus_all_labels.csv', 'w', newline='', encoding=\"utf-8\")", "\"Géographie_T\" : [\"Épistémologie & histoire de la géographie_T\", \"Espace_T\", \"société et territoire_T\", \"Géographie", "\"Sociologie du travail_T\", \"Sociologie économique_T\", \"Sociologie urbaine_T\", \"Sport et loisirs_T\"]} themes_dic = {k.lower():", "# x = text, y = labels, z = filnames X_train_rl, X_test_rl, y_train_rl,", "speichert train- und testset von all_labels in csv-Dateien with open(datasets+'/reduced_labels_trainset.csv', 'w', newline='', encoding=\"utf-8\")", "y = labels, z = filnames X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to =", "for key, value in sorted(all_labels_dic.items()): bpl.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\"", "labellist = \", \".join(labels) textlist = \" \".join(texts) train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv',", "do_csv: dow = csv.writer(do_csv, delimiter = \";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels,", "[\"Balkans_T\", \"Belgique_T\", \"Europe centrale et orientale_T\", \"Mondes russes et soviétiques_T\", \"France_T\", \"Îles britanniques_T\",", "linguistique_D\"], \"Littérature_D\" : [\"Études littéraires_D\", \"Théorie et critique littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\",", "Create names on the y-axis plt.yticks(y_pos, bars) # Show graphic plt.show() # Save", "% (key, value)) count += value print(\"Anzahl der vergebenen Labels:\", count) # In[8]:", "0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]: # speichert train-, validation- und testset von disciplines_only", "\"\"\" # read stopwords words = [] stopwords = open(folder+'/german_stopwords_plain.txt', 'r').read() for token", "\"text\"]) for ident, labels, texts in zip(z_test_al, y_test_al, X_test_al): labellist = \", \".join(labels)", "row is headline for i, document in enumerate(documents[1:]): tmp_ident = document.split(\";\", 1)[0] #print(\"tmp_ident:\",", "Label in txt-Datei # (all_labels nicht reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for", "\"Brésil_T\", \"Cône sud_T\", \"Mexique et Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\", \"États-Unis_T\"], \"anthropologie_T\" :", "nace-code-list and corporate purpose and save in lists def split_csv(self, documents): \"\"\"Splittet jede", "word textliste=str(all_labels_dic.keys()) textliste=textliste.replace(',', '').replace(\"'\", \"\").replace('\"', '').replace(\"l'\", '').split(' ') blacklist = ['et', 'du', 'études',", "\"Psychanalyse_D\", \"Psychologie sociale_D\"], \"Sciences de la santé et de la santé publique_D\" :", "in zip(z_test_al, y_test_al, X_test_al): labellist = \", \".join(labels) textlist = \" \".join(texts) test_al.writerow([ident,", "\"Études culturelles_D\", \"Folklore_D\", \"Humanités pluridisciplinaires_D\", \"Musique_D\", \"Philosophie_D\", \"Religions_D\"], \"bibliothéconomie_D\" : [\"Communication_D\", \"Sciences de", "for x in tmp_label2] tmp_label.extend(tmp_label2) #tmp_label = (tmp_label1 + tmp_label2) #print(tmp_label) tmp_label =", "humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement et de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\",", "remove_small_classes(labelset, label_dic): \"\"\"Löscht die Klassen, denen weniger als 100 Blogbeiträge zugeordnet sind\"\"\" small_classes", "nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return labels,", "labels.append(list(set(tmp_all_labels))) themes.append(list(set(tmp_themes))) disciplines.append(list(set(tmp_disciplines))) #print(\"\\nnew labelset:\", labels) return labels, themes, disciplines reduced_labels, themes_only, disciplines_only", "internationales_D\", \"Sciences politiques_D\", \"Administration publique_D\"], \"Sociologie et anthropologie_D\" : [\"Anthropologie_D\", \"Études régionales_D\", \"Sociologie_D\",", "labels to highest level def reduce_labels(y): \"\"\"Reduziert die Themen und Disziplinen auf die", "(key, value)) print(\"%s: %s\" % (key, value)) # In[9]: # schreibt filename, classes,", "der zugehörigen Blogbeiträge)\"\"\" labelcount_dic = {} #tmp_label = \", \" for label in", "# preprocessing #========================== # convert text to lower-case, remove punctuation and stopwords def", "# (reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for key, value in", "test_size = 0.20, stratify=disciplines_only_reduced, random_state=42) # In[31]: # speichert train-, validation- und testset", "[\"Histoire des sciences_T\", \"Philosophie des sciences_T\", \"Sociologie des sciences_T\"], \"Études du politique_T\" :", "[\"Anthropologie culturelle_T\", \"Anthropologie politique_T\", \"Anthropologie religieuse_T\", \"Anthropologie sociale_T\"], \"Asie_T\" : [\"Asie centrale_T\", \"Asie", "vocab[word] += 1 else: vocab[word] = 1 \"\"\" # read stopwords words =", "2)[1].strip() tmp_label3 = tmp_label3.lower().replace('\"', '').strip().split(\",\") tmp_label3 = [x.strip()+'_t' for x in tmp_label3] tmp_label.extend(tmp_label3)", "X_train_al, X_test_al, y_train_al, y_test_al, z_train_al, z_test_al = train_test_split(textset, all_labels_reduced, identset, test_size = 0.20,", "textlist]) # In[19]: # splittet all_labels in train- und testset # x =", "sociale_D\"], \"Sciences de la santé et de la santé publique_D\" : [\"Éthique_D\", \"Politique", "# dementsprechend wird dieser Blog aus disciplines_only entfernt def delete_blog(identset, labelset, textset): idents", "= \", \".join(labels) textlist = \" \".join(texts) train_al.writerow([ident, labellist, textlist]) with open(datasets+'/all_labels_testset.csv', 'w',", "publique_D\" : [\"Éthique_D\", \"Politique et services de santé_D\", \"Sciences et pratiques des soins_D\",", "if key in vocabulary: vocabulary[key] += value else: vocabulary[key] = value return ident,", "and stopwords def normalize_text(self, text): \"\"\"Bereinigt den Text: - transformiert alles in Kleinschrift", "In[7]: # all_labels dic all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count =", "Hierarchieebene reduzierte Themen:\", len(themes_only_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in", "alles in Kleinschrift - löscht Satz- und Sonderzeichen\"\"\" norm_text = text.lower() # remove", "reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for key, value in sorted(reduced_labels_dic.items()): rl.write(\"%s :", "\"classes\", \"text\"]) for ident, labels, texts in zip(identset, all_labels_reduced, textset): labellist = \",", "\"Économie_T\" : [\"Développement économique_T\", \"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire de", "'«', '»', '≥', '<', '>', '^']: norm_text = norm_text.replace(char, ' ') tokens =", "= \", \".join(labels) textlist = \" \".join(texts) rlw.writerow([ident, labellist, textlist]) # In[19]: #", "et environnement_T\", \"Systèmes_T\", \"modélisation_T\", \"géostatistiques_T\"], \"Histoire_T\" : [\"Histoire des femmes_T\", \"Histoire du travail_T\",", "len(disciplines_only_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (disciplines_only", "in train- und testset # x = text, y = labels, z =", "\"\"\" # file: input data # def __init__(self, file, x, y): def __init__(self,", "label: if l in labelcount_dic: labelcount_dic[l] += 1 else: labelcount_dic[l] = 1 return", "x, y): def __init__(self, file): self.file = file # memory friendlys because doesn't", "texts in zip(z_train_al, y_train_al, X_train_al): labellist = \", \".join(labels) textlist = \" \".join(texts)", "with open(datasets+'/de_labeled_corpus_disciplines_only.csv', 'w', newline='', encoding=\"utf-8\") as do_csv: dow = csv.writer(do_csv, delimiter = \";\")", "#tmp_label = \", \" for label in y: for l in label: if", "disciplines_dic = {k.lower(): [i.lower() for i in v] for k, v in disciplines_dic.items()}", "# Show graphic plt.show() # Save as SVG: plt.savefig(pictures+'/Blogs_all_labels_histogram.svg', format='svg') plt.savefig(pictures+'/Blogs_all_labels_histogram.png') # In[33]:", "wordcloud object wordcloud = WordCloud(width=680, height=680, margin=0, background_color=\"white\").generate(text) # Display the generated image:", "\"‚\",\"'\", '(', ')', '[', ']', '{', '}', '/', '\\\\', '|', '_', '-', '–',", "\"Amériques_T\" : [\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique et Amérique centrale_T\", \"Pays andins_T\",", "encoding=\"utf-8\") as to_csv: tow = csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for", "de la santé publique_D\" : [\"Éthique_D\", \"Politique et services de santé_D\", \"Sciences et", "In[12]: # themes themes_dic = {\"Afrique_T\" : [\"Afrique du nord_T\", \"Algérie_T\", \"Afrique noire_T\",", "normalize_text(self, text): \"\"\"Bereinigt den Text: - transformiert alles in Kleinschrift - löscht Satz-", "labels, texts]) # In[24]: # splittet all_labels in train- und testset # x", "(<100) reduced_labels_reduced, small_classes_reduced_labels = remove_small_classes(reduced_labels, reduced_labels_dic) reduced_labels_reduced_dic = get_label_dic(reduced_labels_reduced) print(\"Klassen mit weniger als", "= 0.20, stratify=all_labels_reduced, random_state=42) # In[11]: # speichert train- und testset von all_labels", "% (key, value)) # In[17]: # reduced_labels reduced dic (<100) reduced_labels_reduced, small_classes_reduced_labels =", "# speichert train-, validation- und testset von disciplines_only in csv-Dateien with open(datasets+'/disciplines_only_trainset.csv', 'w',", "= \";\") tow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, themes_only, textset):", "dic reduced (<100) disciplines_only_reduced, small_classes_disciplines_only = remove_small_classes(disciplines_only, disciplines_only_dic) disciplines_only_reduced_dic = get_label_dic(disciplines_only_reduced) print(\"Klassen mit", "words, vocab #return norm_text, vocab return tokens, vocab # split identnumber, nace-code-list and", ": [\"Amérique latine_T\", \"Brésil_T\", \"Cône sud_T\", \"Mexique et Amérique centrale_T\", \"Pays andins_T\", \"Canada_T\",", "disciplines_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(disciplines_dic))) # In[14]: # reduce labels to", "nos jours_T\", \"XXIe siècle_T\"], \"Époque moderne_T\" : [\"Révolution française_T\", \"XVIe siècle_T\", \"XVIIe siècle_T\",", "der Blogbeiträge pro Label in txt-Datei # (disciplines_only reduziert auf Labels mit mehr", "'3', '4', '5', '6', '7', '8', '9', '.', ',', ';', ':', '!', '?',", "et représentation_T\", \"Géographie appliquée et aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\", \"Migrations_T\", \"immigrations_T\", \"minorités_T\",", "small_classes # # Erstellung des Korpus all_labels # In[7]: # all_labels dic all_labels_dic", "in vocab: vocab[word] += 1 else: vocab[word] = 1 \"\"\" # read stopwords", "archéologie_D\" : [\"Archéologie_D\", \"Histoire_D\", \"Histoire et philosophie des sciences_D\", \"Histoire des sciences sociales_D\"],", "dow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, disciplines_only, textset): dow.writerow([ident, labels,", "openfile.readlines() openfile.close() texts = self.split_csv(documents) for text in texts: #print(\"\\n text in iter:\",", "'w', newline='', encoding=\"utf-8\") as train_al_csv: train_al = csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\", \"classes\",", "tmp_label1 = document.split(\";\", 2)[1].strip() #print(\"tmp_label1:\", tmp_label1) tmp_label2 = document.split(\";\", 3)[2].strip() #print(\"tmp_label2:\", tmp_label2) tmp_text,", "\"Afrique noire_T\", \"Afrique australe_T\", \"Afrique centrale_T\", \"Afrique de l'Est_T\", \"Afrique de l'Ouest_T\"], \"Amériques_T\"", "\"Histoire_T\" : [\"Histoire des femmes_T\", \"Histoire du travail_T\", \"Histoire économique_T\", \"Histoire industrielle_T\", \"Histoire", "elif element in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element nicht gefunden:\", element)", "label in y: for l in label: if l in labelcount_dic: labelcount_dic[l] +=", "y_train_to, X_train_to): labellist = \", \".join(labels) textlist = \" \".join(texts) train_to.writerow([ident, labellist, textlist])", "labels, texts in zip(z_train_al, y_train_al, X_train_al): labellist = \", \".join(labels) textlist = \"", "[] if re.match(\"aes_\", tmp_ident): # Blog \"aes - <NAME>\" hat nur Thèmes: Histoire,", "\"Monde oriental_T\", \"Préhistoire_T\"], \"Psychisme_T\" : [\"Psychanalyse_T\", \"Psychologie_T\"], \"Religions_T\" : [\"Histoire des religions_T\", \"Sociologie", "nicht gefunden:\", element) # discipilnes for key, value in disciplines_dic.items(): if element ==", "in zip(z_train_rl, y_train_rl, X_train_rl): labellist = \", \".join(labels) textlist = \" \".join(texts) train_rl.writerow([ident,", "open(datasets+'/de_labeled_corpus_themes_only.csv', 'w', newline='', encoding=\"utf-8\") as to_csv: tow = csv.writer(to_csv, delimiter = \";\") tow.writerow([\"filename\",", "histoire de la géographie_T\", \"Espace_T\", \"société et territoire_T\", \"Géographie : politique_T\", \"culture et", "in labelcount_dic: labelcount_dic[l] += 1 else: labelcount_dic[l] = 1 return labelcount_dic # In[6]:", "x = text, y = labels, z = filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl,", "l in labelcount_dic: labelcount_dic[l] += 1 else: labelcount_dic[l] = 1 return labelcount_dic #", "blacklist: textliste.remove(element) text = str(textliste).replace(\"'\", \"\") # Create the wordcloud object wordcloud =", "return tokens, vocab # split identnumber, nace-code-list and corporate purpose and save in", "[\"Prospectives_T\", \"XIXe siècle_T\", \"XXe siècle_T\", \"1914-1918_T\", \"1918-1939_T\", \"1939-1945_T\", \"1945-1989_T\", \"1989 à de nos", "[\"Études littéraires_D\", \"Théorie et critique littéraires_D\", \"Littérature britannique_D\", \"Littérature romane_D\", \"Littérature_D\"], \"Management et", "= self.normalize_text(document.split(\";\", 3)[2]) #tmp_text = document.split(\";\", 3)[2] #print(\"tmp_text:\", tmp_text) else: tmp_label1 = document.split(\";\",", "testset # x = text, y = labels, z = filnames X_train_to, X_test_to,", "in value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels)", "from disk identset, labelset, textset, vocab = MyCorpus(file) # save vocabulary to file", "textlist = \" \".join(texts) train_rl.writerow([ident, labellist, textlist]) with open(datasets+'/reduced_labels_testset.csv', 'w', newline='', encoding=\"utf-8\") as", "# In[2]: # Class for accessing and preprocessing the data folder = '../Preprocessing'", "') blacklist = ['et', 'du', 'études', 'de', 'des', 'la', 'dict_keys'] for element in", "reduziert) with open(folder+'/blogposts_per_all_labels.txt',\"w\", encoding=\"utf8\") as bpl: for key, value in sorted(all_labels_dic.items()): bpl.write(\"%s :", "et pratiques des soins_D\", \"Biomédecine_D\", \"Toxicomanie_D\"], \"Sciences de l'information et de la communication_D\"", "def get_label_dic(y): \"\"\"Erstellt ein dictionary zur Anzahl der Blogbeiträge pro Label (Label :", "die Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only reduziert auf Labels", "if element == key: tmp_all_labels.append(element) tmp_disciplines.append(element) #print(\"\\nDiscipline key:\", element) elif element in value:", "value: tmp_all_labels.append(key) tmp_disciplines.append(key) #print(\"\\nDiscipline:\", key) else: (\"Element nicht gefunden:\", element) #print(\"\\ntmp_list:\", tmp_all_labels) labels.append(list(set(tmp_all_labels)))", "reduziert auf Labels mit mehr als 100 Blogbeiträgen) with open(folder+'/blogposts_per_all_labels_reduced.txt',\"w\", encoding=\"utf8\") as bplr:", "russes et soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde germanique_T\", \"Pays baltes et", "delimiter = \";\") dow.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts in zip(identset, disciplines_only,", "scandinaves_T\", \"Péninsule ibérique_T\", \"Suisse_T\"], \"Géographie_T\" : [\"Épistémologie & histoire de la géographie_T\", \"Espace_T\",", "for key, value in sorted(disciplines_only_reduced_dic.items()): dor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\"", "und testset # x = text, y = labels, z = filnames X_train_to,", "[\"Commerce et affaires_D\", \"Économie_D\", \"Finance_D\"], \"Éducation_D\" : [\"Éducation et sciences de l'éducation_D\", \"Éducation", "as rlr: for key, value in sorted(reduced_labels_reduced_dic.items()): rlr.write(\"%s : %s\\n\" % (key, value))", "\"Sciences cognitives_T\"], \"Préhistoire et antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde", "consommation_T\", \"Sociologie de la culture_T\", \"Sociologie de la santé_T\", \"Sociologie du travail_T\", \"Sociologie", "print(len(list(disciplines_dic))) # In[14]: # reduce labels to highest level def reduce_labels(y): \"\"\"Reduziert die", "politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire de l'éducation_T\", \"Sciences de l'éducation_T\"], \"Épistémologie", "disciplines_only_dic = get_label_dic(disciplines_only) print(\"Auf höchste Hierarchieebene reduzierte Klassen insgesammt:\", len(disciplines_only_dic)) # schreibt die", "v in disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value in disciplines_dic.items(): print(\"%s: %s\" % (key,", "labellist, textlist]) # # Visualisierungen # In[ ]: pictures = '../Visualisierungen' if not", "'dict_keys'] for element in textliste: if element in blacklist: textliste.remove(element) text = str(textliste).replace(\"'\",", "centrale et orientale_T\", \"Mondes russes et soviétiques_T\", \"France_T\", \"Îles britanniques_T\", \"Italie_T\", \"Méditerranée_T\", \"Monde", "labels, z = filnames X_train_to, X_test_to, y_train_to, y_test_to, z_train_to, z_test_to = train_test_split(textset, themes_only_reduced,", "train_al = csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for ident, labels, texts", "Hierarchiestufe\"\"\" labels = [] # new y themes = [] disciplines = []", "in zip(identset, reduced_labels_reduced, textset): labellist = \", \".join(labels) textlist = \" \".join(texts) rlw.writerow([ident,", "\" \".join(texts) train_to.writerow([ident, labellist, textlist]) with open(datasets+'/themes_only_testset.csv', 'w', newline='', encoding=\"utf-8\") as test_to_csv: test_to", "# Create horizontal bars plt.barh(y_pos, height) # Create names on the y-axis plt.yticks(y_pos,", "openfile.close() texts = self.split_csv(documents) for text in texts: #print(\"\\n text in iter:\", text)", "'6', '7', '8', '9', '.', ',', ';', ':', '!', '?', '…','·', '·', '\"',", "famille_D\", \"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic = {k.lower(): [i.lower() for i in v]", "\"Études féministes_D\"], \"Travail social et politique sociale_D\" : [\"Études des relations interethniques_D\", \"Études", "'w', newline='', encoding=\"utf-8\") as train_to_csv: train_to = csv.writer(train_to_csv, delimiter = \";\") train_to.writerow([\"filename\", \"classes\",", "in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" % (key, value)) print(\"%s: %s\" % (key, value))", "# Erstellung des Korpus themes_only # In[21]: # themes_only dic themes_only_dic = get_label_dic(themes_only)", "random_state=42) # In[25]: # speichert train-, validation- und testset von themes_only in csv-Dateien", "# In[7]: # all_labels dic all_labels_dic = get_label_dic(labelset) print(\"Klassen insgesammt:\", len(all_labels_dic)) #print(all_labels_dic) count", "100 Blogbeiträge zugeordnet sind\"\"\" small_classes = [] reduced_labelset = [] for key, value", "\"Économie politique_T\", \"Gestion_T\", \"Travail_T\", \"emploi_T\"], \"Éducation_T\" : [\"Histoire de l'éducation_T\", \"Sciences de l'éducation_T\"],", "de l'information et bibliothéconomie_D\"], \"Droit_D\" : [\"Criminologie_D\", \"Droit_D\"], \"Économie_D\" : [\"Commerce et affaires_D\",", "(reduced_labels nicht reduziert) with open(folder+'/blogposts_per_reduced_labels.txt',\"w\", encoding=\"utf8\") as rl: for key, value in sorted(reduced_labels_dic.items()):", "In[33]: # Visualisierung des all_label_dics in einer Wortwolke # Create a list of", "# file: input data # def __init__(self, file, x, y): def __init__(self, file):", "traitement et de représentation_T\", \"Méthodes qualitatives_T\", \"Méthodes quantitatives_T\", \"Sciences auxiliaires de l'Histoire_T\", \"Vie", "relations interethniques_D\", \"Études sur la famille_D\", \"Questions sociales_D\", \"Travail social_D\"]} disciplines_dic = {k.lower():", "'des', 'la', 'dict_keys'] for element in textliste: if element in blacklist: textliste.remove(element) text", "tor: for key, value in sorted(themes_only_reduced_dic.items()): tor.write(\"%s : %s\\n\" % (key, value)) print(\"%s:", "\"Histoire et sociologie du livre_T\", \"Sciences de l'information_T\"], \"Langage_T\" : [\"Linguistique_T\", \"Littératures_T\"], \"Moyen", "encoding=\"utf-8\") as train_al_csv: train_al = csv.writer(train_al_csv, delimiter = \";\") train_al.writerow([\"filename\", \"classes\", \"text\"]) for", "\", \".join(labels) textlist = \" \".join(texts) rlw.writerow([ident, labellist, textlist]) # In[19]: # splittet", "#print(\"\\nlabels in y an der Stelle %s: %s\" % (i, elements)) for element", ": politique_T\", \"culture et représentation_T\", \"Géographie appliquée et aménagement_T\", \"Géographie rurale_T\", \"Géographie urbaine_T\",", "len(reduced_labels_reduced_dic)) # schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (reduced_labels", "schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (themes_only reduziert auf", "= get_label_dic(themes_only_reduced) print(\"Klassen mit weniger als 100 Texten:\", len(small_classes_themes_only)) print(\"Auf höchste Hierarchieebene reduzierte", "len(all_labels_dic)) #print(all_labels_dic) count = 0 classes = list(all_labels_dic) # schreibt die Anzahl der", "\"imagerie_T\", \"SIG_T\", \"Digital humanities_T\", \"Épistémologie_T\", \"Historiographie_T\", \"Méthodes de traitement et de représentation_T\", \"Méthodes", "open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for key, value in sorted(vocab.items()): v.write(\"%s : %s\\n\" %", "\"Préhistoire et antiquité_T\" : [\"Égypte ancienne_T\", \"Histoire grecque_T\", \"Histoire romaine_T\", \"Monde oriental_T\", \"Préhistoire_T\"],", "Histogramm: Blogs pro all_labels (besser in excel visualisieren) height = list(all_labels_dic.values()) bars =", "disciplines_dic.items()} print(\"DISCIPLINES:\") for key, value in disciplines_dic.items(): print(\"%s: %s\" % (key, value)) print(len(list(disciplines_dic)))", "# save vocabulary to file with open(folder+'/blogs_vocabulary.txt',\"w\", encoding=\"utf8\") as v: for key, value", "train- und testset # x = text, y = labels, z = filnames", "texts in zip(z_train_do, y_train_do, X_train_do): labellist = \", \".join(labels) textlist = \" \".join(texts)", "indien_T\", \"Monde persan_T\", \"Moyen-Orient_T\", \"Proche-Orient_T\"], \"Droit_T\" : [\"Histoire du droit_T\", \"Sociologie du droit_T\"],", "schreibt die Anzahl der Blogbeiträge pro Label in txt-Datei # (all_labels reduziert auf", "print(themes_only[1000]) print(disciplines_only[1000]) # In[16]: # reduced_labels dic reduced_labels_dic = get_label_dic(reduced_labels) print(\"Auf höchste Hierarchieebene", "= labels, z = filnames X_train_rl, X_test_rl, y_train_rl, y_test_rl, z_train_rl, z_test_rl = train_test_split(textset,", "= \" \".join(texts) rlw.writerow([ident, labellist, textlist]) # In[19]: # splittet all_labels in train-" ]
[ "as wfile: for line in rfile: if line == '\\n': continue if '['", "line in rfile: if line == '\\n': continue if '[' not in line:", "with open('graduation_raw.txt') as rfile: with open('graduation.txt','w') as wfile: for line in rfile: if", "== '\\n': continue if '[' not in line: wfile.write(line) elif ']' in line:", "in rfile: if line == '\\n': continue if '[' not in line: wfile.write(line)", "if line == '\\n': continue if '[' not in line: wfile.write(line) elif ']'", "wfile: for line in rfile: if line == '\\n': continue if '[' not", "for line in rfile: if line == '\\n': continue if '[' not in", "as rfile: with open('graduation.txt','w') as wfile: for line in rfile: if line ==", "rfile: with open('graduation.txt','w') as wfile: for line in rfile: if line == '\\n':", "open('graduation_raw.txt') as rfile: with open('graduation.txt','w') as wfile: for line in rfile: if line", "<reponame>RDShah/text-analyzer with open('graduation_raw.txt') as rfile: with open('graduation.txt','w') as wfile: for line in rfile:", "open('graduation.txt','w') as wfile: for line in rfile: if line == '\\n': continue if", "if '[' not in line: wfile.write(line) elif ']' in line: wfile.write(line[:line.index('[')]) wfile.write(line[line.index(']')+1:]) wfile.close()", "'\\n': continue if '[' not in line: wfile.write(line) elif ']' in line: wfile.write(line[:line.index('[')])", "continue if '[' not in line: wfile.write(line) elif ']' in line: wfile.write(line[:line.index('[')]) wfile.write(line[line.index(']')+1:])", "line == '\\n': continue if '[' not in line: wfile.write(line) elif ']' in", "rfile: if line == '\\n': continue if '[' not in line: wfile.write(line) elif", "with open('graduation.txt','w') as wfile: for line in rfile: if line == '\\n': continue" ]
[ "'code failed!!') print(warning, 'maybe something wrong.') print(deprecated, 'this function is deprecated.') print(Iro([ Color.RED,", "bg=True), ' '], disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip install", "IRO === Easy and powerful Colorizer for Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python", "powerful Colorizer for Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro import Iro,", "\"[DEPRECATED]\")) print(success, 'code success.') print(error, 'code failed!!') print(warning, 'maybe something wrong.') print(deprecated, 'this", "<reponame>nagataaaas/Iro<gh_stars>1-10 \"\"\" IRO === Easy and powerful Colorizer for Python! Powered by [<NAME>](https://twitter.com/514YJ)", "Python\", \"Topic :: Software Development :: Libraries :: Python Modules\", \"Environment :: Console\"", "print(Iro([ Color.RED, \"Off course, You can nest styles. \", [ Style.ITALIC, \"This is", "to only RED!\" ])) for h in range(256): print(Iro([ColorRGB(*map(lambda x: x * 255,", "Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), \"[ ERROR ]\"), disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255,", "**output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip install iro \"\"\" from setuptools import setup", "os import path about = {} with open(\"iro/__about__.py\") as f: exec(f.read(), about) here", ":: Python :: 3\", \"Programming Language :: Python\", \"Topic :: Software Development ::", "3\", \"Programming Language :: Python\", \"Topic :: Software Development :: Libraries :: Python", "can nest styles. \", [ Style.ITALIC, \"This is RED and ITALIC. \" ],", "Finally back to only RED!\" ])) for h in range(256): print(Iro([ColorRGB(*map(lambda x: x", "\"This is BLUE, BG_YELLOW and UNDERLINED.\" ], \" Finally back to only RED!\"", "\" Finally back to only RED!\" ])) for h in range(256): print(Iro([ColorRGB(*map(lambda x:", "setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\",", "You can nest styles. \", [ Style.ITALIC, \"This is RED and ITALIC. \"", "$ pip install iro \"\"\" from setuptools import setup from os import path", "RED and ITALIC. \" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE, BG_YELLOW", "range(256): print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h / 256, 0.7, 1)), bg=True), '", "OSI Approved :: MIT License\", \"Operating System :: OS Independent\", \"Programming Language ::", "'code success.') print(error, 'code failed!!') print(warning, 'maybe something wrong.') print(deprecated, 'this function is", "\"Operating System :: OS Independent\", \"Programming Language :: Python :: 3\", \"Programming Language", "warning = Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING ]\")) deprecated = Iro((Color256(7), Color256(239, True),", "install iro \"\"\" from setuptools import setup from os import path about =", "here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[],", "Other Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT License\",", "= path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"],", "open(\"iro/__about__.py\") as f: exec(f.read(), about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"],", "is deprecated.') print(Iro([ Color.RED, \"Off course, You can nest styles. \", [ Style.ITALIC,", "nest styles. \", [ Style.ITALIC, \"This is RED and ITALIC. \" ], [", "Style.UNDERLINE, \"This is BLUE, BG_YELLOW and UNDERLINED.\" ], \" Finally back to only", "deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.') print(error, 'code failed!!')", "'this function is deprecated.') print(Iro([ Color.RED, \"Off course, You can nest styles. \",", "[ Style.ITALIC, \"This is RED and ITALIC. \" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE,", "description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status :: 4 -", "exec(f.read(), about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__,", "])) for h in range(256): print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h / 256,", "styles. \", [ Style.ITALIC, \"This is RED and ITALIC. \" ], [ Color.BLUE,", "by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro import Iro, Color, Style, ColorRGB, Color256 from", ":: Python\", \"Topic :: Software Development :: Libraries :: Python Modules\", \"Environment ::", "]\")) deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.') print(error, 'code", "wrong.') print(deprecated, 'this function is deprecated.') print(Iro([ Color.RED, \"Off course, You can nest", "UNDERLINED.\" ], \" Finally back to only RED!\" ])) for h in range(256):", "and ITALIC. \" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE, BG_YELLOW and", "Python :: 3\", \"Programming Language :: Python\", \"Topic :: Software Development :: Libraries", "1)), bg=True), ' '], disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip", "ColorRGB, Color256 from colorsys import hls_to_rgb success = Iro((Color.GREEN, \"[ SUCCESS ]\")) error", "function is deprecated.') print(Iro([ Color.RED, \"Off course, You can nest styles. \", [", "], \" Finally back to only RED!\" ])) for h in range(256): print(Iro([ColorRGB(*map(lambda", "f: exec(f.read(), about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"],", ":: OSI Approved :: MIT License\", \"Operating System :: OS Independent\", \"Programming Language", "\" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE, BG_YELLOW and UNDERLINED.\" ],", "0.7, 1)), bg=True), ' '], disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $", "[<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro import Iro, Color, Style, ColorRGB, Color256 from colorsys", "back to only RED!\" ])) for h in range(256): print(Iro([ColorRGB(*map(lambda x: x *", "author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status ::", "Approved :: MIT License\", \"Operating System :: OS Independent\", \"Programming Language :: Python", "Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro import Iro, Color, Style, ColorRGB,", "disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING ]\")) deprecated = Iro((Color256(7), Color256(239,", "=== Easy and powerful Colorizer for Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from", "and UNDERLINED.\" ], \" Finally back to only RED!\" ])) for h in", "Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.') print(error, 'code failed!!') print(warning, 'maybe", "System :: OS Independent\", \"Programming Language :: Python :: 3\", \"Programming Language ::", "RED!\" ])) for h in range(256): print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h /", "pip install iro \"\"\" from setuptools import setup from os import path about", "Language :: Python\", \"Topic :: Software Development :: Libraries :: Python Modules\", \"Environment", "```python from iro import Iro, Color, Style, ColorRGB, Color256 from colorsys import hls_to_rgb", "Color.RED, \"Off course, You can nest styles. \", [ Style.ITALIC, \"This is RED", "\"Environment :: Other Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved ::", ":: MIT License\", \"Operating System :: OS Independent\", \"Programming Language :: Python ::", "license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status", "``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip install iro \"\"\" from setuptools import", "Iro((Color.GREEN, \"[ SUCCESS ]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), \"[", "\"Programming Language :: Python :: 3\", \"Programming Language :: Python\", \"Topic :: Software", "Colorizer for Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro import Iro, Color,", "is BLUE, BG_YELLOW and UNDERLINED.\" ], \" Finally back to only RED!\" ]))", "colorsys import hls_to_rgb success = Iro((Color.GREEN, \"[ SUCCESS ]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE,", "'], disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip install iro \"\"\"", "Style, ColorRGB, Color256 from colorsys import hls_to_rgb success = Iro((Color.GREEN, \"[ SUCCESS ]\"))", "\"Topic :: Software Development :: Libraries :: Python Modules\", \"Environment :: Console\" ])", "\", [ Style.ITALIC, \"This is RED and ITALIC. \" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW,", "bg=True), \"[ ERROR ]\"), disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING ]\"))", "Style.ITALIC, \"This is RED and ITALIC. \" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This", "disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip install iro \"\"\" from", "success = Iro((Color.GREEN, \"[ SUCCESS ]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0,", "import Iro, Color, Style, ColorRGB, Color256 from colorsys import hls_to_rgb success = Iro((Color.GREEN,", "error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), \"[ ERROR ]\"), disable_rgb=False) warning", "\"\"\" from setuptools import setup from os import path about = {} with", "print(warning, 'maybe something wrong.') print(deprecated, 'this function is deprecated.') print(Iro([ Color.RED, \"Off course,", "Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING ]\")) deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\"))", ":: OS Independent\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python\",", "long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status :: 4 - Beta\", \"Environment", "Status :: 4 - Beta\", \"Environment :: Other Environment\", \"Intended Audience :: Developers\",", "Audience :: Developers\", \"License :: OSI Approved :: MIT License\", \"Operating System ::", "import hls_to_rgb success = Iro((Color.GREEN, \"[ SUCCESS ]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255,", "\"[ SUCCESS ]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), \"[ ERROR", "from iro import Iro, Color, Style, ColorRGB, Color256 from colorsys import hls_to_rgb success", "ERROR ]\"), disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING ]\")) deprecated =", "# Installation $ pip install iro \"\"\" from setuptools import setup from os", "'maybe something wrong.') print(deprecated, 'this function is deprecated.') print(Iro([ Color.RED, \"Off course, You", "for Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro import Iro, Color, Style,", "setup from os import path about = {} with open(\"iro/__about__.py\") as f: exec(f.read(),", "BLUE, BG_YELLOW and UNDERLINED.\" ], \" Finally back to only RED!\" ])) for", ":: Developers\", \"License :: OSI Approved :: MIT License\", \"Operating System :: OS", "import path about = {} with open(\"iro/__about__.py\") as f: exec(f.read(), about) here =", "0, 0, bg=True), \"[ ERROR ]\"), disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255, bg=True), \"[", "![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip install iro \"\"\" from setuptools import setup from", "packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status :: 4 - Beta\", \"Environment :: Other", "\"License :: OSI Approved :: MIT License\", \"Operating System :: OS Independent\", \"Programming", "x: x * 255, hls_to_rgb(h / 256, 0.7, 1)), bg=True), ' '], disable_rgb=False),", "Color256 from colorsys import hls_to_rgb success = Iro((Color.GREEN, \"[ SUCCESS ]\")) error =", "ColorRGB(255, 0, 0, bg=True), \"[ ERROR ]\"), disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255, bg=True),", "for h in range(256): print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h / 256, 0.7,", "\"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT License\", \"Operating System", "and powerful Colorizer for Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro import", "print(success, 'code success.') print(error, 'code failed!!') print(warning, 'maybe something wrong.') print(deprecated, 'this function", "[GitHub](https://github.com/nagataaaas/Iro) ```python from iro import Iro, Color, Style, ColorRGB, Color256 from colorsys import", "bg=True), \"[ WARNING ]\")) deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code", "Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE, BG_YELLOW and UNDERLINED.\" ], \" Finally back", ":: 3\", \"Programming Language :: Python\", \"Topic :: Software Development :: Libraries ::", "hls_to_rgb(h / 256, 0.7, 1)), bg=True), ' '], disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true)", "iro \"\"\" from setuptools import setup from os import path about = {}", "\"Programming Language :: Python\", \"Topic :: Software Development :: Libraries :: Python Modules\",", "long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status :: 4 - Beta\",", "as f: exec(f.read(), about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"],", "course, You can nest styles. \", [ Style.ITALIC, \"This is RED and ITALIC.", "from setuptools import setup from os import path about = {} with open(\"iro/__about__.py\")", "Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.') print(error, 'code failed!!') print(warning, 'maybe something wrong.') print(deprecated,", "= Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), \"[ ERROR ]\"), disable_rgb=False) warning =", "BG_YELLOW and UNDERLINED.\" ], \" Finally back to only RED!\" ])) for h", "success.') print(error, 'code failed!!') print(warning, 'maybe something wrong.') print(deprecated, 'this function is deprecated.')", "failed!!') print(warning, 'maybe something wrong.') print(deprecated, 'this function is deprecated.') print(Iro([ Color.RED, \"Off", "= {} with open(\"iro/__about__.py\") as f: exec(f.read(), about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"],", "\"Development Status :: 4 - Beta\", \"Environment :: Other Environment\", \"Intended Audience ::", "\"This is RED and ITALIC. \" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is", "\"\"\" IRO === Easy and powerful Colorizer for Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro)", "Developers\", \"License :: OSI Approved :: MIT License\", \"Operating System :: OS Independent\",", "WARNING ]\")) deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.') print(error,", "255, hls_to_rgb(h / 256, 0.7, 1)), bg=True), ' '], disable_rgb=False), end='') ``` **output**", "{} with open(\"iro/__about__.py\") as f: exec(f.read(), about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"],", "Independent\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python\", \"Topic ::", "Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), \"[ ERROR ]\"), disable_rgb=False) warning = Iro((Color.YELLOW,", "platforms=\"any\", classifiers=[ \"Development Status :: 4 - Beta\", \"Environment :: Other Environment\", \"Intended", "in range(256): print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h / 256, 0.7, 1)), bg=True),", "import setup from os import path about = {} with open(\"iro/__about__.py\") as f:", "= Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING ]\")) deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE,", "Installation $ pip install iro \"\"\" from setuptools import setup from os import", "True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.') print(error, 'code failed!!') print(warning, 'maybe something wrong.')", "version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[", "zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status :: 4 - Beta\", \"Environment :: Other Environment\",", "about = {} with open(\"iro/__about__.py\") as f: exec(f.read(), about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"],", "Beta\", \"Environment :: Other Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved", "Easy and powerful Colorizer for Python! Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro", "is RED and ITALIC. \" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE,", ":: Other Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT", "hls_to_rgb success = Iro((Color.GREEN, \"[ SUCCESS ]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0,", "[ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE, BG_YELLOW and UNDERLINED.\" ], \" Finally", "\"Off course, You can nest styles. \", [ Style.ITALIC, \"This is RED and", "\"[ ERROR ]\"), disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING ]\")) deprecated", "Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.') print(error, 'code failed!!') print(warning, 'maybe something", "h in range(256): print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h / 256, 0.7, 1)),", "only RED!\" ])) for h in range(256): print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h", "install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status :: 4 - Beta\", \"Environment ::", "\"[ WARNING ]\")) deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.')", "print(error, 'code failed!!') print(warning, 'maybe something wrong.') print(deprecated, 'this function is deprecated.') print(Iro([", "], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE, BG_YELLOW and UNDERLINED.\" ], \"", "url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development", "0, bg=True), \"[ ERROR ]\"), disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING", "something wrong.') print(deprecated, 'this function is deprecated.') print(Iro([ Color.RED, \"Off course, You can", "MIT License\", \"Operating System :: OS Independent\", \"Programming Language :: Python :: 3\",", "path about = {} with open(\"iro/__about__.py\") as f: exec(f.read(), about) here = path.abspath(path.dirname(__file__))", "classifiers=[ \"Development Status :: 4 - Beta\", \"Environment :: Other Environment\", \"Intended Audience", "4 - Beta\", \"Environment :: Other Environment\", \"Intended Audience :: Developers\", \"License ::", "author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True, platforms=\"any\", classifiers=[ \"Development Status :: 4", "/ 256, 0.7, 1)), bg=True), ' '], disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) #", ":: 4 - Beta\", \"Environment :: Other Environment\", \"Intended Audience :: Developers\", \"License", "Powered by [<NAME>](https://twitter.com/514YJ) [GitHub](https://github.com/nagataaaas/Iro) ```python from iro import Iro, Color, Style, ColorRGB, Color256", "Environment\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT License\", \"Operating", "about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\",", "Iro, Color, Style, ColorRGB, Color256 from colorsys import hls_to_rgb success = Iro((Color.GREEN, \"[", "- Beta\", \"Environment :: Other Environment\", \"Intended Audience :: Developers\", \"License :: OSI", "]\"), disable_rgb=False) warning = Iro((Color.YELLOW, Color256(255, bg=True), \"[ WARNING ]\")) deprecated = Iro((Color256(7),", "print(deprecated, 'this function is deprecated.') print(Iro([ Color.RED, \"Off course, You can nest styles.", "x * 255, hls_to_rgb(h / 256, 0.7, 1)), bg=True), ' '], disable_rgb=False), end='')", "Language :: Python :: 3\", \"Programming Language :: Python\", \"Topic :: Software Development", "Color, Style, ColorRGB, Color256 from colorsys import hls_to_rgb success = Iro((Color.GREEN, \"[ SUCCESS", "Color256(255, bg=True), \"[ WARNING ]\")) deprecated = Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success,", "with open(\"iro/__about__.py\") as f: exec(f.read(), about) here = path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"],", "SUCCESS ]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), \"[ ERROR ]\"),", "256, 0.7, 1)), bg=True), ' '], disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation", "= Iro((Color256(7), Color256(239, True), Style.STRIKE, \"[DEPRECATED]\")) print(success, 'code success.') print(error, 'code failed!!') print(warning,", "deprecated.') print(Iro([ Color.RED, \"Off course, You can nest styles. \", [ Style.ITALIC, \"This", "= Iro((Color.GREEN, \"[ SUCCESS ]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True),", "Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE, BG_YELLOW and UNDERLINED.\" ], \" Finally back to", "print(Iro([ColorRGB(*map(lambda x: x * 255, hls_to_rgb(h / 256, 0.7, 1)), bg=True), ' '],", "setuptools import setup from os import path about = {} with open(\"iro/__about__.py\") as", "OS Independent\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python\", \"Topic", "from os import path about = {} with open(\"iro/__about__.py\") as f: exec(f.read(), about)", "* 255, hls_to_rgb(h / 256, 0.7, 1)), bg=True), ' '], disable_rgb=False), end='') ```", "end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip install iro \"\"\" from setuptools", "]\")) error = Iro((Color.WHITE, Style.DOUBLY_UNDERLINE, ColorRGB(255, 0, 0, bg=True), \"[ ERROR ]\"), disable_rgb=False)", "from colorsys import hls_to_rgb success = Iro((Color.GREEN, \"[ SUCCESS ]\")) error = Iro((Color.WHITE,", "' '], disable_rgb=False), end='') ``` **output** ![output](https://github.com/nagataaaas/Iro/blob/main/assets/capture1.png?raw=true) # Installation $ pip install iro", "iro import Iro, Color, Style, ColorRGB, Color256 from colorsys import hls_to_rgb success =", "License\", \"Operating System :: OS Independent\", \"Programming Language :: Python :: 3\", \"Programming", "ITALIC. \" ], [ Color.BLUE, Color.BG_BRIGHT_YELLOW, Style.UNDERLINE, \"This is BLUE, BG_YELLOW and UNDERLINED.\"", "path.abspath(path.dirname(__file__)) setup(name=about[\"__title__\"], version=about[\"__version__\"], url=about[\"__url__\"], license=about[\"__license__\"], author=about[\"__author__\"], author_email=about[\"__author_email__\"], description=about[\"__description__\"], long_description=__doc__, long_description_content_type=\"text/markdown\", install_requires=[], packages=[\"iro\"], zip_safe=True," ]
[ "\"\"\"Check if code is valid\"\"\" return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK,", "def __repr__(self): return \"Message(code={}, uuid={}, content={}, destination={}, name={})\".format( self.code, str(self.uuid), self.content, self.destination, self.name)", "1 PING = 2 PRIVATE = 3 PUBLIC = 4 OK = 5", "self.content.encode() def __repr__(self): return \"Message(code={}, uuid={}, content={}, destination={}, name={})\".format( self.code, str(self.uuid), self.content, self.destination,", "\"\" if not cls.valid_code(code): raise ValidationError(\"invalid code\") uuid = UUID(bytes=uuid) if data_length >", "1024 # 1 byte for code, 16 for UUID, 1007 for content class", "return \"Message(code={}, uuid={}, content={}, destination={}, name={})\".format( self.code, str(self.uuid), self.content, self.destination, self.name) @classmethod def", "def __bytes__(self): \"\"\"Convert the message to bytes string\"\"\" return bytes([self.code]) + self.uuid.bytes +", "content = data[17:].decode() except UnicodeError: raise ValidationError(\"content is not a valid unicode string\")", "and return a message\"\"\" data_length = len(data) if data_length < 17 or data_length", "17: try: content = data[17:].decode() except UnicodeError: raise ValidationError(\"content is not a valid", "self.content, self.destination, self.name) @classmethod def valid_code(cls, code): \"\"\"Check if code is valid\"\"\" return", "cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls, data, destination): \"\"\"Parse binary data", "self.name = name def __bytes__(self): \"\"\"Convert the message to bytes string\"\"\" return bytes([self.code])", "data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size: \"+str(data_length)) code, uuid, content = int(data[0]),", "> 17: try: content = data[17:].decode() except UnicodeError: raise ValidationError(\"content is not a", "content class ValidationError(Exception): pass class Message: AUTHORIZATION = 1 PING = 2 PRIVATE", "16 for UUID, 1007 for content class ValidationError(Exception): pass class Message: AUTHORIZATION =", "code): \"\"\"Check if code is valid\"\"\" return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC,", "= name def __bytes__(self): \"\"\"Convert the message to bytes string\"\"\" return bytes([self.code]) +", "= data[17:].decode() except UnicodeError: raise ValidationError(\"content is not a valid unicode string\") return", "uuid4 MAX_MESSAGE_LENGTH = 1024 # 1 byte for code, 16 for UUID, 1007", "= 1024 # 1 byte for code, 16 for UUID, 1007 for content", "raise ValidationError(\"invalid message size: \"+str(data_length)) code, uuid, content = int(data[0]), data[1:17], \"\" if", "is valid\"\"\" return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def", "< 17 or data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size: \"+str(data_length)) code, uuid,", "= len(data) if data_length < 17 or data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message", "cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls, data, destination): \"\"\"Parse binary data and return", "= 3 PUBLIC = 4 OK = 5 UNAUTHORIZED = 6 def __init__(self,", "= 6 def __init__(self, code=None, uuid=None, content=\"\", destination=None, \\ name=None): self.code = code", "UUID, 1007 for content class ValidationError(Exception): pass class Message: AUTHORIZATION = 1 PING", "raise ValidationError(\"invalid code\") uuid = UUID(bytes=uuid) if data_length > 17: try: content =", "= 2 PRIVATE = 3 PUBLIC = 4 OK = 5 UNAUTHORIZED =", "return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls, data,", "try: content = data[17:].decode() except UnicodeError: raise ValidationError(\"content is not a valid unicode", "1007 for content class ValidationError(Exception): pass class Message: AUTHORIZATION = 1 PING =", "class Message: AUTHORIZATION = 1 PING = 2 PRIVATE = 3 PUBLIC =", "= 4 OK = 5 UNAUTHORIZED = 6 def __init__(self, code=None, uuid=None, content=\"\",", "name={})\".format( self.code, str(self.uuid), self.content, self.destination, self.name) @classmethod def valid_code(cls, code): \"\"\"Check if code", "code is valid\"\"\" return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod", "__init__(self, code=None, uuid=None, content=\"\", destination=None, \\ name=None): self.code = code self.uuid = uuid", "except UnicodeError: raise ValidationError(\"content is not a valid unicode string\") return cls(code=code, uuid=uuid,", "ValidationError(\"invalid message size: \"+str(data_length)) code, uuid, content = int(data[0]), data[1:17], \"\" if not", "uuid = UUID(bytes=uuid) if data_length > 17: try: content = data[17:].decode() except UnicodeError:", "bytes([self.code]) + self.uuid.bytes + self.content.encode() def __repr__(self): return \"Message(code={}, uuid={}, content={}, destination={}, name={})\".format(", "= 5 UNAUTHORIZED = 6 def __init__(self, code=None, uuid=None, content=\"\", destination=None, \\ name=None):", "for code, 16 for UUID, 1007 for content class ValidationError(Exception): pass class Message:", "if not cls.valid_code(code): raise ValidationError(\"invalid code\") uuid = UUID(bytes=uuid) if data_length > 17:", "if data_length > 17: try: content = data[17:].decode() except UnicodeError: raise ValidationError(\"content is", "def valid_code(cls, code): \"\"\"Check if code is valid\"\"\" return code in [cls.AUTHORIZATION, cls.PING,", "PRIVATE = 3 PUBLIC = 4 OK = 5 UNAUTHORIZED = 6 def", "AUTHORIZATION = 1 PING = 2 PRIVATE = 3 PUBLIC = 4 OK", "cls.UNAUTHORIZED] @classmethod def parse(cls, data, destination): \"\"\"Parse binary data and return a message\"\"\"", "UUID, uuid4 MAX_MESSAGE_LENGTH = 1024 # 1 byte for code, 16 for UUID,", "content self.destination = destination self.name = name def __bytes__(self): \"\"\"Convert the message to", "a message\"\"\" data_length = len(data) if data_length < 17 or data_length > MAX_MESSAGE_LENGTH:", "ValidationError(Exception): pass class Message: AUTHORIZATION = 1 PING = 2 PRIVATE = 3", "= int(data[0]), data[1:17], \"\" if not cls.valid_code(code): raise ValidationError(\"invalid code\") uuid = UUID(bytes=uuid)", "+ self.content.encode() def __repr__(self): return \"Message(code={}, uuid={}, content={}, destination={}, name={})\".format( self.code, str(self.uuid), self.content,", "UUID(bytes=uuid) if data_length > 17: try: content = data[17:].decode() except UnicodeError: raise ValidationError(\"content", "data_length > 17: try: content = data[17:].decode() except UnicodeError: raise ValidationError(\"content is not", "message size: \"+str(data_length)) code, uuid, content = int(data[0]), data[1:17], \"\" if not cls.valid_code(code):", "self.name) @classmethod def valid_code(cls, code): \"\"\"Check if code is valid\"\"\" return code in", "data_length < 17 or data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size: \"+str(data_length)) code,", "cls.valid_code(code): raise ValidationError(\"invalid code\") uuid = UUID(bytes=uuid) if data_length > 17: try: content", "destination self.name = name def __bytes__(self): \"\"\"Convert the message to bytes string\"\"\" return", "data, destination): \"\"\"Parse binary data and return a message\"\"\" data_length = len(data) if", "string\"\"\" return bytes([self.code]) + self.uuid.bytes + self.content.encode() def __repr__(self): return \"Message(code={}, uuid={}, content={},", "self.destination = destination self.name = name def __bytes__(self): \"\"\"Convert the message to bytes", "message to bytes string\"\"\" return bytes([self.code]) + self.uuid.bytes + self.content.encode() def __repr__(self): return", "MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size: \"+str(data_length)) code, uuid, content = int(data[0]), data[1:17], \"\"", "class ValidationError(Exception): pass class Message: AUTHORIZATION = 1 PING = 2 PRIVATE =", "@classmethod def parse(cls, data, destination): \"\"\"Parse binary data and return a message\"\"\" data_length", "in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls, data, destination): \"\"\"Parse", "__bytes__(self): \"\"\"Convert the message to bytes string\"\"\" return bytes([self.code]) + self.uuid.bytes + self.content.encode()", "bytes string\"\"\" return bytes([self.code]) + self.uuid.bytes + self.content.encode() def __repr__(self): return \"Message(code={}, uuid={},", "PING = 2 PRIVATE = 3 PUBLIC = 4 OK = 5 UNAUTHORIZED", "content=\"\", destination=None, \\ name=None): self.code = code self.uuid = uuid or uuid4() self.content", "cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls, data, destination): \"\"\"Parse binary data and return a", "# 1 byte for code, 16 for UUID, 1007 for content class ValidationError(Exception):", "byte for code, 16 for UUID, 1007 for content class ValidationError(Exception): pass class", "cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls, data, destination): \"\"\"Parse binary data and", "if data_length < 17 or data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size: \"+str(data_length))", "code, uuid, content = int(data[0]), data[1:17], \"\" if not cls.valid_code(code): raise ValidationError(\"invalid code\")", "code, 16 for UUID, 1007 for content class ValidationError(Exception): pass class Message: AUTHORIZATION", "destination=None, \\ name=None): self.code = code self.uuid = uuid or uuid4() self.content =", "17 or data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size: \"+str(data_length)) code, uuid, content", "6 def __init__(self, code=None, uuid=None, content=\"\", destination=None, \\ name=None): self.code = code self.uuid", "uuid, content = int(data[0]), data[1:17], \"\" if not cls.valid_code(code): raise ValidationError(\"invalid code\") uuid", "UNAUTHORIZED = 6 def __init__(self, code=None, uuid=None, content=\"\", destination=None, \\ name=None): self.code =", "size: \"+str(data_length)) code, uuid, content = int(data[0]), data[1:17], \"\" if not cls.valid_code(code): raise", "<gh_stars>1-10 from uuid import UUID, uuid4 MAX_MESSAGE_LENGTH = 1024 # 1 byte for", "self.code, str(self.uuid), self.content, self.destination, self.name) @classmethod def valid_code(cls, code): \"\"\"Check if code is", "uuid={}, content={}, destination={}, name={})\".format( self.code, str(self.uuid), self.content, self.destination, self.name) @classmethod def valid_code(cls, code):", "uuid import UUID, uuid4 MAX_MESSAGE_LENGTH = 1024 # 1 byte for code, 16", "5 UNAUTHORIZED = 6 def __init__(self, code=None, uuid=None, content=\"\", destination=None, \\ name=None): self.code", "str(self.uuid), self.content, self.destination, self.name) @classmethod def valid_code(cls, code): \"\"\"Check if code is valid\"\"\"", "@classmethod def valid_code(cls, code): \"\"\"Check if code is valid\"\"\" return code in [cls.AUTHORIZATION,", "not cls.valid_code(code): raise ValidationError(\"invalid code\") uuid = UUID(bytes=uuid) if data_length > 17: try:", "self.code = code self.uuid = uuid or uuid4() self.content = content self.destination =", "self.content = content self.destination = destination self.name = name def __bytes__(self): \"\"\"Convert the", "for content class ValidationError(Exception): pass class Message: AUTHORIZATION = 1 PING = 2", "__repr__(self): return \"Message(code={}, uuid={}, content={}, destination={}, name={})\".format( self.code, str(self.uuid), self.content, self.destination, self.name) @classmethod", "the message to bytes string\"\"\" return bytes([self.code]) + self.uuid.bytes + self.content.encode() def __repr__(self):", "def parse(cls, data, destination): \"\"\"Parse binary data and return a message\"\"\" data_length =", "destination): \"\"\"Parse binary data and return a message\"\"\" data_length = len(data) if data_length", "uuid=None, content=\"\", destination=None, \\ name=None): self.code = code self.uuid = uuid or uuid4()", "uuid4() self.content = content self.destination = destination self.name = name def __bytes__(self): \"\"\"Convert", "raise ValidationError(\"content is not a valid unicode string\") return cls(code=code, uuid=uuid, content=content, destination=destination)", "= uuid or uuid4() self.content = content self.destination = destination self.name = name", "uuid or uuid4() self.content = content self.destination = destination self.name = name def", "pass class Message: AUTHORIZATION = 1 PING = 2 PRIVATE = 3 PUBLIC", "code self.uuid = uuid or uuid4() self.content = content self.destination = destination self.name", "import UUID, uuid4 MAX_MESSAGE_LENGTH = 1024 # 1 byte for code, 16 for", "valid_code(cls, code): \"\"\"Check if code is valid\"\"\" return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE,", "code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls, data, destination):", "int(data[0]), data[1:17], \"\" if not cls.valid_code(code): raise ValidationError(\"invalid code\") uuid = UUID(bytes=uuid) if", "= content self.destination = destination self.name = name def __bytes__(self): \"\"\"Convert the message", "4 OK = 5 UNAUTHORIZED = 6 def __init__(self, code=None, uuid=None, content=\"\", destination=None,", "\\ name=None): self.code = code self.uuid = uuid or uuid4() self.content = content", "\"\"\"Convert the message to bytes string\"\"\" return bytes([self.code]) + self.uuid.bytes + self.content.encode() def", "def __init__(self, code=None, uuid=None, content=\"\", destination=None, \\ name=None): self.code = code self.uuid =", "1 byte for code, 16 for UUID, 1007 for content class ValidationError(Exception): pass", "3 PUBLIC = 4 OK = 5 UNAUTHORIZED = 6 def __init__(self, code=None,", "= UUID(bytes=uuid) if data_length > 17: try: content = data[17:].decode() except UnicodeError: raise", "return bytes([self.code]) + self.uuid.bytes + self.content.encode() def __repr__(self): return \"Message(code={}, uuid={}, content={}, destination={},", "for UUID, 1007 for content class ValidationError(Exception): pass class Message: AUTHORIZATION = 1", "> MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size: \"+str(data_length)) code, uuid, content = int(data[0]), data[1:17],", "binary data and return a message\"\"\" data_length = len(data) if data_length < 17", "data[17:].decode() except UnicodeError: raise ValidationError(\"content is not a valid unicode string\") return cls(code=code,", "valid\"\"\" return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls,", "ValidationError(\"invalid code\") uuid = UUID(bytes=uuid) if data_length > 17: try: content = data[17:].decode()", "= 1 PING = 2 PRIVATE = 3 PUBLIC = 4 OK =", "PUBLIC = 4 OK = 5 UNAUTHORIZED = 6 def __init__(self, code=None, uuid=None,", "name=None): self.code = code self.uuid = uuid or uuid4() self.content = content self.destination", "UnicodeError: raise ValidationError(\"content is not a valid unicode string\") return cls(code=code, uuid=uuid, content=content,", "self.uuid = uuid or uuid4() self.content = content self.destination = destination self.name =", "data[1:17], \"\" if not cls.valid_code(code): raise ValidationError(\"invalid code\") uuid = UUID(bytes=uuid) if data_length", "= destination self.name = name def __bytes__(self): \"\"\"Convert the message to bytes string\"\"\"", "code\") uuid = UUID(bytes=uuid) if data_length > 17: try: content = data[17:].decode() except", "+ self.uuid.bytes + self.content.encode() def __repr__(self): return \"Message(code={}, uuid={}, content={}, destination={}, name={})\".format( self.code,", "2 PRIVATE = 3 PUBLIC = 4 OK = 5 UNAUTHORIZED = 6", "OK = 5 UNAUTHORIZED = 6 def __init__(self, code=None, uuid=None, content=\"\", destination=None, \\", "from uuid import UUID, uuid4 MAX_MESSAGE_LENGTH = 1024 # 1 byte for code,", "or data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size: \"+str(data_length)) code, uuid, content =", "content={}, destination={}, name={})\".format( self.code, str(self.uuid), self.content, self.destination, self.name) @classmethod def valid_code(cls, code): \"\"\"Check", "len(data) if data_length < 17 or data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid message size:", "data_length = len(data) if data_length < 17 or data_length > MAX_MESSAGE_LENGTH: raise ValidationError(\"invalid", "\"Message(code={}, uuid={}, content={}, destination={}, name={})\".format( self.code, str(self.uuid), self.content, self.destination, self.name) @classmethod def valid_code(cls,", "destination={}, name={})\".format( self.code, str(self.uuid), self.content, self.destination, self.name) @classmethod def valid_code(cls, code): \"\"\"Check if", "to bytes string\"\"\" return bytes([self.code]) + self.uuid.bytes + self.content.encode() def __repr__(self): return \"Message(code={},", "data and return a message\"\"\" data_length = len(data) if data_length < 17 or", "message\"\"\" data_length = len(data) if data_length < 17 or data_length > MAX_MESSAGE_LENGTH: raise", "code=None, uuid=None, content=\"\", destination=None, \\ name=None): self.code = code self.uuid = uuid or", "= code self.uuid = uuid or uuid4() self.content = content self.destination = destination", "content = int(data[0]), data[1:17], \"\" if not cls.valid_code(code): raise ValidationError(\"invalid code\") uuid =", "Message: AUTHORIZATION = 1 PING = 2 PRIVATE = 3 PUBLIC = 4", "name def __bytes__(self): \"\"\"Convert the message to bytes string\"\"\" return bytes([self.code]) + self.uuid.bytes", "self.uuid.bytes + self.content.encode() def __repr__(self): return \"Message(code={}, uuid={}, content={}, destination={}, name={})\".format( self.code, str(self.uuid),", "self.destination, self.name) @classmethod def valid_code(cls, code): \"\"\"Check if code is valid\"\"\" return code", "or uuid4() self.content = content self.destination = destination self.name = name def __bytes__(self):", "return a message\"\"\" data_length = len(data) if data_length < 17 or data_length >", "[cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED] @classmethod def parse(cls, data, destination): \"\"\"Parse binary", "\"+str(data_length)) code, uuid, content = int(data[0]), data[1:17], \"\" if not cls.valid_code(code): raise ValidationError(\"invalid", "\"\"\"Parse binary data and return a message\"\"\" data_length = len(data) if data_length <", "if code is valid\"\"\" return code in [cls.AUTHORIZATION, cls.PING, cls.PRIVATE, cls.PUBLIC, cls.OK, cls.UNAUTHORIZED]", "parse(cls, data, destination): \"\"\"Parse binary data and return a message\"\"\" data_length = len(data)", "MAX_MESSAGE_LENGTH = 1024 # 1 byte for code, 16 for UUID, 1007 for" ]
[ "r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist", "'f4', ('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad', 'f4',", "%H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep] if (today", "initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml) # ----- adj initial conditions -----", "of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad',", "unorm = np.array(unorm) r2d = 180. / np.pi outname = nml.file_output.replace('.nc', '.adj.nc') with", "nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml)", "clock_start).total_seconds() / mpsw.config_dt) while today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0]", "clock_start).total_seconds() % nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep", "adj initial conditions ----- mpsw.u_ad[0, 0, 0] = 1. mpsw.h_ad[0, 0, 0] =", "# ----- adj initial conditions ----- mpsw.u_ad[0, 0, 0] = 1. mpsw.h_ad[0, 0,", "nc import numpy as np sys.path.append('../src/') from module_sw_mpas import mpas_sw_module as mpsw from", "nc.Dataset(fwdname, 'r') # ----- end nonlinear state trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml)", "itimestep -= 1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist), np.array(vlist) hlist =", "itimestep = int((clock_end - clock_start).total_seconds() / mpsw.config_dt) while today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today),", "----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end =", "initial conditions ----- today = clock_end ulist, vlist, hlist = [], [], []", "= namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml) #", "0] = 1. mpsw.h_ad[0, 0, 0] = 1. # ----- end adj initial", "with nc.Dataset(outname, 'w') as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4',", "timedelta import netCDF4 as nc import numpy as np sys.path.append('../src/') from module_sw_mpas import", "== 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -= 1 today", "of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell *", "as nc import numpy as np sys.path.append('../src/') from module_sw_mpas import mpas_sw_module as mpsw", "----- end adj initial conditions ----- today = clock_end ulist, vlist, hlist =", "mpas_sw_driver import read_configs, read_dims, read_vars, \\ initial_conditions, clock_namelist def run_sw_adj(): # ----- nonlinear", "conditions ----- mpsw.u_ad[0, 0, 0] = 1. mpsw.h_ad[0, 0, 0] = 1. #", "'f4', ('nCell'))[:] = mpsw.loncell * r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge * r2d", "clock_namelist(nml) # ----- adj initial conditions ----- mpsw.u_ad[0, 0, 0] = 1. mpsw.h_ad[0,", "% nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -=", "= 1. mpsw.h_ad[0, 0, 0] = 1. # ----- end adj initial conditions", "('nCell'))[:] = mpsw.loncell * r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge * r2d of.createVariable('lonEdge',", "import mpas_sw_module as mpsw from mpas_namelist import namelist from mpas_sw_driver import read_configs, read_dims,", "mpsw.latcell * r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell * r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:]", "initial_conditions, clock_namelist def run_sw_adj(): # ----- nonlinear state trajectory ----- fwdname = 'x1.10242.state.nc'", "datetime import timedelta import netCDF4 as nc import numpy as np sys.path.append('../src/') from", "mpsw.sw_rk4_adj() itimestep -= 1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist), np.array(vlist) hlist", "('nCell'))[:] = mpsw.latcell * r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell * r2d of.createVariable('latEdge',", "mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell * r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell", "netCDF4 as nc import numpy as np sys.path.append('../src/') from module_sw_mpas import mpas_sw_module as", "np.array(vlist) hlist = np.array(hlist) unorm = np.array(unorm) r2d = 180. / np.pi outname", "0] = fwdfile['h'][itimestep] if (today - clock_start).total_seconds() % nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy())", "('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:] = unorm if __name__ ==", "/ np.pi outname = nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w') as of: of.createDimension('nTime', hlist.shape[0])", "'f4', ('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:] = unorm if __name__", "int((clock_end - clock_start).total_seconds() / mpsw.config_dt) while today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum())", "numpy as np sys.path.append('../src/') from module_sw_mpas import mpas_sw_module as mpsw from mpas_namelist import", "'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r') # ----- end nonlinear state trajectory ----- nml", "# ----- end nonlinear state trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml)", "= np.array(ulist), np.array(vlist) hlist = np.array(hlist) unorm = np.array(unorm) r2d = 180. /", "= 180. / np.pi outname = nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w') as of:", "vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -= 1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist,", "1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist), np.array(vlist) hlist = np.array(hlist) unorm", "today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist), np.array(vlist) hlist = np.array(hlist) unorm =", "----- adj initial conditions ----- mpsw.u_ad[0, 0, 0] = 1. mpsw.h_ad[0, 0, 0]", "0] = fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep] if (today - clock_start).total_seconds() % nml.output_interval", "'f4', ('nCell'))[:] = mpsw.latcell * r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell * r2d", "run_sw_adj(): # ----- nonlinear state trajectory ----- fwdname = 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname,", "fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep] if (today - clock_start).total_seconds() % nml.output_interval == 0:", "vlist = np.array(ulist), np.array(vlist) hlist = np.array(hlist) unorm = np.array(unorm) r2d = 180.", "0, 0] = 1. mpsw.h_ad[0, 0, 0] = 1. # ----- end adj", "of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge * r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge *", "of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:] = unorm if", "= 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r') # ----- end nonlinear state trajectory -----", "# ----- nonlinear state trajectory ----- fwdname = 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r')", "[], [], [] unorm = [] itimestep = int((clock_end - clock_start).total_seconds() / mpsw.config_dt)", "'nCell'))[:] = vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:]", "mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell * r2d of.createVariable('lonCell', 'f4', ('nCell'))[:]", "read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml) # ----- adj initial conditions", "('nEdge'))[:] = mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4',", "1. # ----- end adj initial conditions ----- today = clock_end ulist, vlist,", "ulist, vlist, hlist = [], [], [] unorm = [] itimestep = int((clock_end", "mpsw.config_dt) while today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep]", "mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml) # ----- adj initial conditions ----- mpsw.u_ad[0,", "mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml) # ----- adj initial conditions ----- mpsw.u_ad[0, 0,", "0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -= 1 today -=", "if (today - clock_start).total_seconds() % nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0,", "np sys.path.append('../src/') from module_sw_mpas import mpas_sw_module as mpsw from mpas_namelist import namelist from", "hlist = [], [], [] unorm = [] itimestep = int((clock_end - clock_start).total_seconds()", "= [] itimestep = int((clock_end - clock_start).total_seconds() / mpsw.config_dt) while today >= clock_start:", "# ----- end adj initial conditions ----- today = clock_end ulist, vlist, hlist", "nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w') as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0])", "('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad', 'f4', ('nTime',", "np.array(unorm) r2d = 180. / np.pi outname = nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w')", "end nonlinear state trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj()", "namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml) # -----", "of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad',", "'r') # ----- end nonlinear state trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml)", "import netCDF4 as nc import numpy as np sys.path.append('../src/') from module_sw_mpas import mpas_sw_module", "def run_sw_adj(): # ----- nonlinear state trajectory ----- fwdname = 'x1.10242.state.nc' fwdfile =", "= nc.Dataset(fwdname, 'r') # ----- end nonlinear state trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242')", "unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -= 1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist),", "hlist = np.array(hlist) unorm = np.array(unorm) r2d = 180. / np.pi outname =", "read_configs, read_dims, read_vars, \\ initial_conditions, clock_namelist def run_sw_adj(): # ----- nonlinear state trajectory", "----- nonlinear state trajectory ----- fwdname = 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r') #", "adj initial conditions ----- today = clock_end ulist, vlist, hlist = [], [],", "fwdname = 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r') # ----- end nonlinear state trajectory", "import read_configs, read_dims, read_vars, \\ initial_conditions, clock_namelist def run_sw_adj(): # ----- nonlinear state", "mpsw.h_ad[0, 0, 0] = 1. # ----- end adj initial conditions ----- today", "hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell * r2d of.createVariable('lonCell',", "np.array(ulist), np.array(vlist) hlist = np.array(hlist) unorm = np.array(unorm) r2d = 180. / np.pi", "= [], [], [] unorm = [] itimestep = int((clock_end - clock_start).total_seconds() /", "= clock_namelist(nml) # ----- adj initial conditions ----- mpsw.u_ad[0, 0, 0] = 1.", "nc.Dataset(outname, 'w') as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:]", "= vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:] =", "unorm = [] itimestep = int((clock_end - clock_start).total_seconds() / mpsw.config_dt) while today >=", "as mpsw from mpas_namelist import namelist from mpas_sw_driver import read_configs, read_dims, read_vars, \\", "= ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] =", "/ mpsw.config_dt) while today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] =", "= mpsw.loncell * r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge * r2d of.createVariable('lonEdge', 'f4',", "ulist, vlist = np.array(ulist), np.array(vlist) hlist = np.array(hlist) unorm = np.array(unorm) r2d =", "module_sw_mpas import mpas_sw_module as mpsw from mpas_namelist import namelist from mpas_sw_driver import read_configs,", "of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist", "hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -= 1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist", "of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell * r2d", "initial conditions ----- mpsw.u_ad[0, 0, 0] = 1. mpsw.h_ad[0, 0, 0] = 1.", "'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:]", "'nCell'))[:] = hlist of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:] = unorm if __name__ == '__main__':", "conditions ----- today = clock_end ulist, vlist, hlist = [], [], [] unorm", "mpsw from mpas_namelist import namelist from mpas_sw_driver import read_configs, read_dims, read_vars, \\ initial_conditions,", "fwdfile = nc.Dataset(fwdname, 'r') # ----- end nonlinear state trajectory ----- nml =", "clock_end ulist, vlist, hlist = [], [], [] unorm = [] itimestep =", "print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep] if", "from module_sw_mpas import mpas_sw_module as mpsw from mpas_namelist import namelist from mpas_sw_driver import", "trajectory ----- fwdname = 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r') # ----- end nonlinear", "timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist), np.array(vlist) hlist = np.array(hlist) unorm = np.array(unorm) r2d", "* r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge * r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] =", "mpas_sw_module as mpsw from mpas_namelist import namelist from mpas_sw_driver import read_configs, read_dims, read_vars,", "'f4', ('nEdge'))[:] = mpsw.latedge * r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge * r2d", "* r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] =", "= mpsw.latcell * r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell * r2d of.createVariable('latEdge', 'f4',", "- clock_start).total_seconds() / mpsw.config_dt) while today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0,", "[] itimestep = int((clock_end - clock_start).total_seconds() / mpsw.config_dt) while today >= clock_start: print('{:%Y-%m-%d", "----- fwdname = 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r') # ----- end nonlinear state", "fwdfile['h'][itimestep] if (today - clock_start).total_seconds() % nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy())", "read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml) # ----- adj", "[], [] unorm = [] itimestep = int((clock_end - clock_start).total_seconds() / mpsw.config_dt) while", "[] unorm = [] itimestep = int((clock_end - clock_start).total_seconds() / mpsw.config_dt) while today", "sys.path.append('../src/') from module_sw_mpas import mpas_sw_module as mpsw from mpas_namelist import namelist from mpas_sw_driver", "-= 1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist), np.array(vlist) hlist = np.array(hlist)", "'f4', ('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad', 'f4',", "nonlinear state trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block()", "<reponame>xtian15/MPAS-SW-TL-AD import sys from datetime import timedelta import netCDF4 as nc import numpy", "of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell * r2d of.createVariable('lonCell', 'f4',", "today = clock_end ulist, vlist, hlist = [], [], [] unorm = []", "import sys from datetime import timedelta import netCDF4 as nc import numpy as", "namelist from mpas_sw_driver import read_configs, read_dims, read_vars, \\ initial_conditions, clock_namelist def run_sw_adj(): #", "from datetime import timedelta import netCDF4 as nc import numpy as np sys.path.append('../src/')", "from mpas_namelist import namelist from mpas_sw_driver import read_configs, read_dims, read_vars, \\ initial_conditions, clock_namelist", "clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep]", ">= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0, 0] =", "vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:] = unorm", "read_vars, \\ initial_conditions, clock_namelist def run_sw_adj(): # ----- nonlinear state trajectory ----- fwdname", "trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end", "mpsw.u_ad[0, 0, 0] = 1. mpsw.h_ad[0, 0, 0] = 1. # ----- end", "= int((clock_end - clock_start).total_seconds() / mpsw.config_dt) while today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(),", "np.array(hlist) unorm = np.array(unorm) r2d = 180. / np.pi outname = nml.file_output.replace('.nc', '.adj.nc')", "180. / np.pi outname = nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w') as of: of.createDimension('nTime',", "r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell * r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge", "= mpsw.latedge * r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4',", "----- end nonlinear state trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml)", "as np sys.path.append('../src/') from module_sw_mpas import mpas_sw_module as mpsw from mpas_namelist import namelist", "clock_namelist def run_sw_adj(): # ----- nonlinear state trajectory ----- fwdname = 'x1.10242.state.nc' fwdfile", "read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start, clock_end = clock_namelist(nml) # ----- adj initial", "'w') as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] =", "outname = nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w') as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0])", "-= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist), np.array(vlist) hlist = np.array(hlist) unorm = np.array(unorm)", "mpas_namelist import namelist from mpas_sw_driver import read_configs, read_dims, read_vars, \\ initial_conditions, clock_namelist def", "= fwdfile['h'][itimestep] if (today - clock_start).total_seconds() % nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0,", "('nEdge'))[:] = mpsw.latedge * r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge * r2d of.createVariable('ux_ad',", "0, 0] = 1. # ----- end adj initial conditions ----- today =", "vlist, hlist = [], [], [] unorm = [] itimestep = int((clock_end -", "\\ initial_conditions, clock_namelist def run_sw_adj(): # ----- nonlinear state trajectory ----- fwdname =", "'.adj.nc') with nc.Dataset(outname, 'w') as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell',", "mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep] if (today -", "state trajectory ----- nml = namelist(nmlname='namelist.sw.x1.10242') read_configs(nml) read_dims(nml) read_vars(nml) initial_conditions(nml) mpsw.var_allocation_adj() mpsw.sw_mpas_init_block() clock_start,", "mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep] if (today - clock_start).total_seconds() %", "today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0, 0]", "state trajectory ----- fwdname = 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r') # ----- end", "----- today = clock_end ulist, vlist, hlist = [], [], [] unorm =", "of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell * r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell *", "= mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4', ('nTime',", "mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:]", "= clock_end ulist, vlist, hlist = [], [], [] unorm = [] itimestep", "ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad', 'f4', ('nTime', 'nCell'))[:] = hlist", "while today >= clock_start: print('{:%Y-%m-%d %H:%M}'.format(today), mpsw.u_ad.sum(), mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0,", "1. mpsw.h_ad[0, 0, 0] = 1. # ----- end adj initial conditions -----", "= fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep] if (today - clock_start).total_seconds() % nml.output_interval ==", "from mpas_sw_driver import read_configs, read_dims, read_vars, \\ initial_conditions, clock_namelist def run_sw_adj(): # -----", "nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -= 1", "= nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w') as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge',", "r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge * r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge", "= 1. # ----- end adj initial conditions ----- today = clock_end ulist,", "('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad', 'f4', ('nTime', 'nCell'))[:] = vlist of.createVariable('h_ad', 'f4', ('nTime',", "import timedelta import netCDF4 as nc import numpy as np sys.path.append('../src/') from module_sw_mpas", "r2d = 180. / np.pi outname = nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w') as", "np.pi outname = nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname, 'w') as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell',", "* r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:]", "read_dims, read_vars, \\ initial_conditions, clock_namelist def run_sw_adj(): # ----- nonlinear state trajectory -----", "of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell * r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] =", "ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -= 1 today -= timedelta(seconds=int(mpsw.config_dt))", "* r2d of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell * r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] =", "- clock_start).total_seconds() % nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj()", "= np.array(unorm) r2d = 180. / np.pi outname = nml.file_output.replace('.nc', '.adj.nc') with nc.Dataset(outname,", "mpsw.loncell * r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge * r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:]", "end adj initial conditions ----- today = clock_end ulist, vlist, hlist = [],", "clock_end = clock_namelist(nml) # ----- adj initial conditions ----- mpsw.u_ad[0, 0, 0] =", "= np.array(hlist) unorm = np.array(unorm) r2d = 180. / np.pi outname = nml.file_output.replace('.nc',", "of.createVariable('lonCell', 'f4', ('nCell'))[:] = mpsw.loncell * r2d of.createVariable('latEdge', 'f4', ('nEdge'))[:] = mpsw.latedge *", "= hlist of.createVariable('u_ad', 'f4', ('nTime', 'nEdge'))[:] = unorm if __name__ == '__main__': run_sw_adj()", "0].copy()) unorm.append(mpsw.u_ad[0, 0].copy()) mpsw.sw_rk4_adj() itimestep -= 1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist =", "mpsw.latedge * r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4', ('nTime',", "nonlinear state trajectory ----- fwdname = 'x1.10242.state.nc' fwdfile = nc.Dataset(fwdname, 'r') # -----", "as of: of.createDimension('nTime', hlist.shape[0]) of.createDimension('nCell', mpsw.latcell.shape[0]) of.createDimension('nEdge', mpsw.latedge.shape[0]) of.createVariable('latCell', 'f4', ('nCell'))[:] = mpsw.latcell", "'f4', ('nEdge'))[:] = mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] = ulist of.createVariable('uy_ad',", "----- mpsw.u_ad[0, 0, 0] = 1. mpsw.h_ad[0, 0, 0] = 1. # -----", "(today - clock_start).total_seconds() % nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy()) vlist.append(mpsw.ureconstructmeridional_ad[0].copy()) hlist.append(mpsw.h_ad[0, 0].copy()) unorm.append(mpsw.u_ad[0, 0].copy())", "mpsw.h_ad.sum()) mpsw.u[0, 0] = fwdfile['u'][itimestep] mpsw.h[0, 0] = fwdfile['h'][itimestep] if (today - clock_start).total_seconds()", "clock_start, clock_end = clock_namelist(nml) # ----- adj initial conditions ----- mpsw.u_ad[0, 0, 0]", "import numpy as np sys.path.append('../src/') from module_sw_mpas import mpas_sw_module as mpsw from mpas_namelist", "sys from datetime import timedelta import netCDF4 as nc import numpy as np", "import namelist from mpas_sw_driver import read_configs, read_dims, read_vars, \\ initial_conditions, clock_namelist def run_sw_adj():", "r2d of.createVariable('lonEdge', 'f4', ('nEdge'))[:] = mpsw.lonedge * r2d of.createVariable('ux_ad', 'f4', ('nTime', 'nCell'))[:] =", "0].copy()) mpsw.sw_rk4_adj() itimestep -= 1 today -= timedelta(seconds=int(mpsw.config_dt)) ulist, vlist = np.array(ulist), np.array(vlist)", "0] = 1. # ----- end adj initial conditions ----- today = clock_end", "mpsw.h[0, 0] = fwdfile['h'][itimestep] if (today - clock_start).total_seconds() % nml.output_interval == 0: ulist.append(mpsw.ureconstructzonal_ad[0].copy())" ]
[ "\"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 } ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\":", "props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": {", "\"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False }, \"secrets\": []", "\"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30", "\"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 } ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\":", "] }, \"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\":", "def save(self, *args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % ( self.subscription, self.resourceGroup, self.type, self.name)", "\"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\": { \"publisher\":", "save(self, *args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % ( self.subscription, self.resourceGroup, self.type, self.name) super().save(args,", "\"bootDiagnostics\": { \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" })", "30 }, { \"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": {", "\"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\":", "}, \"networkProfile\": { \"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\": {", "\"version\": \"latest\" }, \"osDisk\": { \"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\",", "\"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\":", "{ \"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\",", "flask_mongoengine import MongoEngine class Properties: name = \"example\" nic = \"example-nic\" disk =", "db from flask import current_app as app from flask_mongoengine import MongoEngine class Properties:", "= \"example-resource-group\" availabilitySet = \"example-availability-set\" ppg = \"example-proximity-placement-group\" props = Properties() store =", "\"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, \"dataDisks\": [ { \"lun\": 0, \"name\": \"myDataDisk0\",", "nic = \"example-nic\" disk = \"example-disk\" vmId = str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup", "\"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" },", "True, \"enableAutomaticUpdates\": False }, \"secrets\": [] }, \"networkProfile\": { \"networkInterfaces\": [ { \"id\":", "\"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 } ] }, \"userData\":", "\"Succeeded\" }) meta = {'collection': 'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\" % self.rid def", "app from flask_mongoengine import MongoEngine class Properties: name = \"example\" nic = \"example-nic\"", "ppg = \"example-proximity-placement-group\" props = Properties() store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags =", "\"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 }", "}, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True,", "availabilitySet = \"example-availability-set\" ppg = \"example-proximity-placement-group\" props = Properties() store = MongoEngine(app._get_current_object()) class", "] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\":", "\"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\":", "\"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, { \"lun\": 1, \"name\": \"myDataDisk1\",", "import mongoengine as db from flask import current_app as app from flask_mongoengine import", "db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup = db.StringField(required=True) rid = db.StringField(required=True, unique=True) provisioningState =", "}, \"storageProfile\": { \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\"", "100 } ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\":", "f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, { \"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\":", "\"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\": { \"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\":", "\"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False }, \"secrets\": [] },", "= db.StringField(required=True) location = db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup = db.StringField(required=True) rid =", "}, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": {", "[ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\": True,", "meta = {'collection': 'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\" % self.rid def save(self, *args,", "'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\" % self.rid def save(self, *args, **kwargs): self.rid =", "\"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, \"dataDisks\": [", "\"example-disk\" vmId = str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet = \"example-availability-set\"", "\"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\":", "[] }, \"networkProfile\": { \"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\":", "rid = db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={", "}, \"secrets\": [] }, \"networkProfile\": { \"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ]", "{ \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\":", "\"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\":", "\"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 } ]", "unique=True) provisioningState = db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\":", "\"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\": {", "} ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": {", "\"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, \"dataDisks\": [ { \"lun\": 0, \"name\":", "str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet = \"example-availability-set\" ppg = \"example-proximity-placement-group\"", "\"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, {", "\"dataDisks\": [ { \"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": {", "1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\"", "\"networkProfile\": { \"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\": { \"bootDiagnostics\":", "\"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\":", "\"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\",", "False }, \"secrets\": [] }, \"networkProfile\": { \"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" }", "\"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" }) meta =", "return \"VirtualMachine(%s)\" % self.rid def save(self, *args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % (", "= db.StringField(required=True) rid = db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties", "\"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 },", "% self.rid def save(self, *args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % ( self.subscription, self.resourceGroup,", "{ \"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\",", "\"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False }, \"secrets\": [] }, \"networkProfile\": { \"networkInterfaces\":", "\"diskSizeGB\": 30 }, { \"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\":", "\"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\"", "subId = str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet = \"example-availability-set\" ppg = \"example-proximity-placement-group\" props", "\"provisionVMAgent\": True, \"enableAutomaticUpdates\": False }, \"secrets\": [] }, \"networkProfile\": { \"networkInterfaces\": [ {", "{ \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" }) meta", "\"sku\": \"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\": { \"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\",", "[ { \"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\":", "\"storageProfile\": { \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\" },", "\"example-availability-set\" ppg = \"example-proximity-placement-group\" props = Properties() store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags", "\"PT50M\", \"provisioningState\": \"Succeeded\" }) meta = {'collection': 'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\" %", "class Properties: name = \"example\" nic = \"example-nic\" disk = \"example-disk\" vmId =", "f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\":", "\"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\": { \"osType\": \"Windows\", \"name\":", "\"VirtualMachine(%s)\" % self.rid def save(self, *args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % ( self.subscription,", "f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 } ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\",", "\"example-nic\" disk = \"example-disk\" vmId = str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup = \"example-resource-group\"", "\"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\",", "{ \"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False }, \"secrets\":", "30 }, \"dataDisks\": [ { \"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\",", "\"diskSizeGB\": 100 } ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\": \"admin\",", "f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" }", "}, { \"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\":", "\"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\":", "current_app as app from flask_mongoengine import MongoEngine class Properties: name = \"example\" nic", "type = db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" },", "{ \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\"", "mongoengine as db from flask import current_app as app from flask_mongoengine import MongoEngine", "= \"example-availability-set\" ppg = \"example-proximity-placement-group\" props = Properties() store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document):", "\"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, \"dataDisks\":", "}) meta = {'collection': 'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\" % self.rid def save(self,", "\"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" },", "MongoEngine class Properties: name = \"example\" nic = \"example-nic\" disk = \"example-disk\" vmId", "\"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 },", "= db.StringField(required=True) resourceGroup = db.StringField(required=True) rid = db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded') type", "VirtualMachine(db.Document): tags = db.DictField() name = db.StringField(required=True) location = db.StringField(required=True) subscription = db.StringField(required=True)", "subscription = db.StringField(required=True) resourceGroup = db.StringField(required=True) rid = db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded')", "\"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\":", "= \"example-disk\" vmId = str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet =", "\"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\":", "resourceGroup = db.StringField(required=True) rid = db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines')", "import uuid import mongoengine as db from flask import current_app as app from", "= \"example\" nic = \"example-nic\" disk = \"example-disk\" vmId = str(uuid.uuid4()) subId =", "\"provisioningState\": \"Succeeded\" }) meta = {'collection': 'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\" % self.rid", "uuid import mongoengine as db from flask import current_app as app from flask_mongoengine", "db.DictField() name = db.StringField(required=True) location = db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup = db.StringField(required=True)", "props = Properties() store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags = db.DictField() name =", "= db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\":", "f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" }) meta = {'collection': 'virtualmachines'} def", "properties = db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\":", "{ \"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\": { \"bootDiagnostics\": {", "= \"example-nic\" disk = \"example-disk\" vmId = str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup =", "name = db.StringField(required=True) location = db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup = db.StringField(required=True) rid", "\"example-resource-group\" availabilitySet = \"example-availability-set\" ppg = \"example-proximity-placement-group\" props = Properties() store = MongoEngine(app._get_current_object())", "{ \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\": {", "\"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\": { \"osType\": \"Windows\", \"name\": \"myOsDisk\",", "\"example\" nic = \"example-nic\" disk = \"example-disk\" vmId = str(uuid.uuid4()) subId = str(uuid.uuid4())", "}, \"diskSizeGB\": 30 }, { \"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\", \"caching\": \"ReadWrite\",", "}, \"diskSizeGB\": 100 } ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\":", "= MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags = db.DictField() name = db.StringField(required=True) location = db.StringField(required=True)", "import current_app as app from flask_mongoengine import MongoEngine class Properties: name = \"example\"", "\"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" }) meta = {'collection': 'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\"", "tags = db.DictField() name = db.StringField(required=True) location = db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup", "name = \"example\" nic = \"example-nic\" disk = \"example-disk\" vmId = str(uuid.uuid4()) subId", "class VirtualMachine(db.Document): tags = db.DictField() name = db.StringField(required=True) location = db.StringField(required=True) subscription =", "\"createOption\": \"Attach\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100", "as app from flask_mongoengine import MongoEngine class Properties: name = \"example\" nic =", "{'collection': 'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\" % self.rid def save(self, *args, **kwargs): self.rid", "provisioningState = db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": {", "= db.DictField() name = db.StringField(required=True) location = db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup =", "\"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, { \"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\": \"Attach\",", "\"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\": { \"osType\": \"Windows\",", "\"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\": { \"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\":", "\"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False", "0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\"", "as db from flask import current_app as app from flask_mongoengine import MongoEngine class", "{ \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\": True, \"storageUri\":", "= db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup = db.StringField(required=True) rid = db.StringField(required=True, unique=True) provisioningState", "MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags = db.DictField() name = db.StringField(required=True) location = db.StringField(required=True) subscription", "}, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\":", "\"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 } ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": {", "\"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" },", "\"example-proximity-placement-group\" props = Properties() store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags = db.DictField() name", "\"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, \"dataDisks\": [ {", "\"diskSizeGB\": 30 }, \"dataDisks\": [ { \"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\":", "Properties() store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags = db.DictField() name = db.StringField(required=True) location", "\"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\":", "= \"example-proximity-placement-group\" props = Properties() store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags = db.DictField()", "= db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\"", "\"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, { \"lun\": 1, \"name\": \"myDataDisk1\", \"createOption\":", "def __repr__(self): return \"VirtualMachine(%s)\" % self.rid def save(self, *args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s'", "\"vmId\": props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\":", "Properties: name = \"example\" nic = \"example-nic\" disk = \"example-disk\" vmId = str(uuid.uuid4())", "{ \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\": { \"osType\":", "\"secrets\": [] }, \"networkProfile\": { \"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] },", "= db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\":", "\"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, { \"lun\": 1,", "} ] }, \"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } },", "import MongoEngine class Properties: name = \"example\" nic = \"example-nic\" disk = \"example-disk\"", "}, \"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\",", "str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet = \"example-availability-set\" ppg = \"example-proximity-placement-group\" props = Properties()", "\"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30", "}, \"diskSizeGB\": 30 }, \"dataDisks\": [ { \"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\",", "} }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" }) meta = {'collection': 'virtualmachines'} def __repr__(self):", "store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags = db.DictField() name = db.StringField(required=True) location =", "from flask_mongoengine import MongoEngine class Properties: name = \"example\" nic = \"example-nic\" disk", "__repr__(self): return \"VirtualMachine(%s)\" % self.rid def save(self, *args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' %", "db.StringField(required=True) location = db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup = db.StringField(required=True) rid = db.StringField(required=True,", "f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }, \"hardwareProfile\": { \"vmSize\": \"Standard_DS3_v2\" }, \"storageProfile\": { \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\",", "self.rid def save(self, *args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % ( self.subscription, self.resourceGroup, self.type,", "db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\": props.vmId,", "= Properties() store = MongoEngine(app._get_current_object()) class VirtualMachine(db.Document): tags = db.DictField() name = db.StringField(required=True)", "db.StringField(required=True) rid = db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties =", "\"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\" } ] }, \"diagnosticsProfile\": { \"bootDiagnostics\": { \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\"", "{ \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 } ] }, \"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\",", "\"userData\": \"RXhhbXBsZSBVc2VyRGF0YQ==\", \"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\":", "= str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet = \"example-availability-set\" ppg =", "location = db.StringField(required=True) subscription = db.StringField(required=True) resourceGroup = db.StringField(required=True) rid = db.StringField(required=True, unique=True)", "\"latest\" }, \"osDisk\": { \"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\":", "{ \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, { \"lun\": 1, \"name\":", "db.StringField(required=True) resourceGroup = db.StringField(required=True) rid = db.StringField(required=True, unique=True) provisioningState = db.StringField(default='Succeeded') type =", "{ \"bootDiagnostics\": { \"enabled\": True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\"", "f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, \"dataDisks\": [ { \"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\":", "\"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" }) meta = {'collection': 'virtualmachines'}", "}, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" }) meta = {'collection': 'virtualmachines'} def __repr__(self): return", "{ \"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\",", "True, \"storageUri\": f\"http://{props.name}.blob.core.windows.net\" } }, \"extensionsTimeBudget\": \"PT50M\", \"provisioningState\": \"Succeeded\" }) meta = {'collection':", "\"enableAutomaticUpdates\": False }, \"secrets\": [] }, \"networkProfile\": { \"networkInterfaces\": [ { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Network/networkInterfaces/{props.nic}\"", "db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\"", "\"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\":", "{ \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False }, \"secrets\": [] }, \"networkProfile\": { \"networkInterfaces\": [", "\"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" },", "\"osProfile\": { \"computerName\": \"myVM\", \"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False },", "\"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False }, \"secrets\": [] }, \"networkProfile\": {", "*args, **kwargs): self.rid = '/subscriptions/%s/resourceGroups/%s/providers/%s/%s' % ( self.subscription, self.resourceGroup, self.type, self.name) super().save(args, kwargs)", "vmId = str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet = \"example-availability-set\" ppg", "= str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet = \"example-availability-set\" ppg = \"example-proximity-placement-group\" props =", "from flask import current_app as app from flask_mongoengine import MongoEngine class Properties: name", "{ \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, \"dataDisks\": [ { \"lun\":", "flask import current_app as app from flask_mongoengine import MongoEngine class Properties: name =", "\"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 100 } ] },", "}, \"osDisk\": { \"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": {", "\"adminUsername\": \"admin\", \"windowsConfiguration\": { \"provisionVMAgent\": True, \"enableAutomaticUpdates\": False }, \"secrets\": [] }, \"networkProfile\":", "= {'collection': 'virtualmachines'} def __repr__(self): return \"VirtualMachine(%s)\" % self.rid def save(self, *args, **kwargs):", "}, \"dataDisks\": [ { \"lun\": 0, \"name\": \"myDataDisk0\", \"createOption\": \"Empty\", \"caching\": \"ReadWrite\", \"managedDisk\":", "\"ReadWrite\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, { \"lun\":", "\"storageAccountType\": \"Premium_LRS\", \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/disks/{props.disk}\" }, \"diskSizeGB\": 30 }, \"dataDisks\": [ { \"lun\": 0,", "\"osDisk\": { \"osType\": \"Windows\", \"name\": \"myOsDisk\", \"createOption\": \"FromImage\", \"caching\": \"ReadWrite\", \"managedDisk\": { \"storageAccountType\":", "= db.StringField(default='Succeeded') type = db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": { \"id\":", "db.StringField(default='Microsoft.Compute/virtualMachines') properties = db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": {", "{ \"imageReference\": { \"publisher\": \"MicrosoftWindowsServer\", \"offer\": \"WindowsServer\", \"sku\": \"2016-Datacenter\", \"version\": \"latest\" }, \"osDisk\":", "disk = \"example-disk\" vmId = str(uuid.uuid4()) subId = str(uuid.uuid4()) rgroup = \"example-resource-group\" availabilitySet", "rgroup = \"example-resource-group\" availabilitySet = \"example-availability-set\" ppg = \"example-proximity-placement-group\" props = Properties() store", "db.DictField(default={ \"vmId\": props.vmId, \"availabilitySet\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/availabilitySets/my-AvailabilitySet\" }, \"proximityPlacementGroup\": { \"id\": f\"/subscriptions/{props.subId}/resourceGroups/{props.rgroup}/providers/Microsoft.Compute/proximityPlacementGroups/{props.ppg}\" }," ]
[ "# Generated by Django 3.0 on 2019-12-18 12:15 from django.db import migrations, models", "models class Migration(migrations.Migration): dependencies = [ ('graduated_site', '0006_auto_20191218_0927'), ] operations = [ migrations.AddField(", "[ ('graduated_site', '0006_auto_20191218_0927'), ] operations = [ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'),", "name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post', name='working_area', field=models.ManyToManyField(null=True, related_name='alan', to='graduated_site.working_area', verbose_name='Çalışma Alanları'),", "[ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post', name='working_area', field=models.ManyToManyField(null=True, related_name='alan',", "2019-12-18 12:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('graduated_site',", "dependencies = [ ('graduated_site', '0006_auto_20191218_0927'), ] operations = [ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True,", "on 2019-12-18 12:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "3.0 on 2019-12-18 12:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "migrations, models class Migration(migrations.Migration): dependencies = [ ('graduated_site', '0006_auto_20191218_0927'), ] operations = [", "Django 3.0 on 2019-12-18 12:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "by Django 3.0 on 2019-12-18 12:15 from django.db import migrations, models class Migration(migrations.Migration):", "Generated by Django 3.0 on 2019-12-18 12:15 from django.db import migrations, models class", "field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post', name='working_area', field=models.ManyToManyField(null=True, related_name='alan', to='graduated_site.working_area', verbose_name='Çalışma Alanları'), ),", "= [ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post', name='working_area', field=models.ManyToManyField(null=True,", "migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post', name='working_area', field=models.ManyToManyField(null=True, related_name='alan', to='graduated_site.working_area',", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('graduated_site', '0006_auto_20191218_0927'), ] operations =", "upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post', name='working_area', field=models.ManyToManyField(null=True, related_name='alan', to='graduated_site.working_area', verbose_name='Çalışma Alanları'), ), ]", "12:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('graduated_site', '0006_auto_20191218_0927'),", "'0006_auto_20191218_0927'), ] operations = [ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField(", "Migration(migrations.Migration): dependencies = [ ('graduated_site', '0006_auto_20191218_0927'), ] operations = [ migrations.AddField( model_name='user_internship_post', name='image',", "operations = [ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post', name='working_area',", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('graduated_site', '0006_auto_20191218_0927'), ]", "('graduated_site', '0006_auto_20191218_0927'), ] operations = [ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('graduated_site', '0006_auto_20191218_0927'), ] operations", "model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post', name='working_area', field=models.ManyToManyField(null=True, related_name='alan', to='graduated_site.working_area', verbose_name='Çalışma", "<filename>graduated_site/migrations/0007_auto_20191218_1215.py # Generated by Django 3.0 on 2019-12-18 12:15 from django.db import migrations,", "class Migration(migrations.Migration): dependencies = [ ('graduated_site', '0006_auto_20191218_0927'), ] operations = [ migrations.AddField( model_name='user_internship_post',", "= [ ('graduated_site', '0006_auto_20191218_0927'), ] operations = [ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='',", "] operations = [ migrations.AddField( model_name='user_internship_post', name='image', field=models.ImageField(null=True, upload_to='', verbose_name='Resim'), ), migrations.AlterField( model_name='user_internship_post'," ]
[ "for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject content = emailBuilder.content return Mail(from_email=fromId,", "= Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject", "Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject content", "import Strings class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for", "in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject content = emailBuilder.content return Mail(from_email=fromId, to_emails=toids, subject=subject,", "builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject", "fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject =", "class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for ids in", "from CommonCode.strings import Strings class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids =", "Mail from CommonCode.strings import Strings class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids", "emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject content = emailBuilder.content return Mail(from_email=fromId, to_emails=toids, subject=subject, html_content=content)", "from sendgrid.helpers.mail import Mail from CommonCode.strings import Strings class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId", "sendgrid.helpers.mail import Mail from CommonCode.strings import Strings class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId =", "= list() for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject content = emailBuilder.content", "list() for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject content = emailBuilder.content return", "import Mail from CommonCode.strings import Strings class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId);", "CommonCode.strings import Strings class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list()", "ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject content = emailBuilder.content return Mail(from_email=fromId, to_emails=toids,", "SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for ids in emailBuilder.toId:", "toids = list() for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids)) subject = emailBuilder.subject content =", "def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for ids in emailBuilder.toId: toids.append(Strings.getFormattedEmail(builder=ids))", "Strings class SendGridEmailHelper: def builderToMail(self,emailBuilder): fromId = Strings.getFormattedEmail(builder=emailBuilder.fromId); toids = list() for ids" ]
[ "import assert_equal class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response = request.SetRGB(0,", "from sphero import request from nose.tools import assert_equal class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd',", "sphero import request from nose.tools import assert_equal class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes)", "unittest2 from sphero import request from nose.tools import assert_equal class RequestTest(unittest2.TestCase): def test_ping(self):", "nose.tools import assert_equal class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response =", "test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response = request.SetRGB(0, 0, 100, 200, 0) assert_equal('\\x00d\\xC8\\x00',", "assert_equal class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response = request.SetRGB(0, 0,", "RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response = request.SetRGB(0, 0, 100, 200,", "assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response = request.SetRGB(0, 0, 100, 200, 0) assert_equal('\\x00d\\xC8\\x00', response.packet_body())", "import request from nose.tools import assert_equal class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def", "import unittest2 from sphero import request from nose.tools import assert_equal class RequestTest(unittest2.TestCase): def", "from nose.tools import assert_equal class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response", "request from nose.tools import assert_equal class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self):", "class RequestTest(unittest2.TestCase): def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response = request.SetRGB(0, 0, 100,", "<reponame>yoshikiohshima/gato<gh_stars>0 import unittest2 from sphero import request from nose.tools import assert_equal class RequestTest(unittest2.TestCase):", "def test_ping(self): assert_equal('\\xff\\xff\\x00\\x01\\x00\\x01\\xfd', request.Ping().bytes) def test_set_rgb(self): response = request.SetRGB(0, 0, 100, 200, 0)" ]
[ "== 1: return 1 while a > 1: q = a // b", "def mul_inv(a, b): b0 = b x0, x1 = 0, 1 if b", "b a, b = b, a % b x0, x1 = x1 -", "= reduce(lambda a, b: a * b, n) for n_i, a_i in zip(n,", "functools import reduce import re # Usage: # n = [3, 5, 7]", "a_i * mul_inv(p, n_i) * p return sum % prod def mul_inv(a, b):", "prod = reduce(lambda a, b: a * b, n) for n_i, a_i in", "a % b x0, x1 = x1 - q * x0, x0 if", "n) for n_i, a_i in zip(n, a): p = prod // n_i sum", "https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum = 0 prod = reduce(lambda a, b: a", "3, 2] # chinese_remainder(n, a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum", "1 while a > 1: q = a // b a, b =", "b x0, x1 = 0, 1 if b == 1: return 1 while", "p = prod // n_i sum += a_i * mul_inv(p, n_i) * p", "a, b = b, a % b x0, x1 = x1 - q", "= a // b a, b = b, a % b x0, x1", "23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum = 0 prod = reduce(lambda a,", "a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum = 0 prod =", "> 1: q = a // b a, b = b, a %", "b0 return x1 def numbers_from(l): regex = r\"-?\\d+\" return [int(match) for match in", "for n_i, a_i in zip(n, a): p = prod // n_i sum +=", "b: a * b, n) for n_i, a_i in zip(n, a): p =", "mul_inv(a, b): b0 = b x0, x1 = 0, 1 if b ==", "x1 = 0, 1 if b == 1: return 1 while a >", "% prod def mul_inv(a, b): b0 = b x0, x1 = 0, 1", "= b x0, x1 = 0, 1 if b == 1: return 1", "n_i, a_i in zip(n, a): p = prod // n_i sum += a_i", "x1 = x1 - q * x0, x0 if x1 < 0: x1", "== 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum = 0 prod = reduce(lambda", "a > 1: q = a // b a, b = b, a", "return 1 while a > 1: q = a // b a, b", "mul_inv(p, n_i) * p return sum % prod def mul_inv(a, b): b0 =", "x0, x1 = x1 - q * x0, x0 if x1 < 0:", "sum = 0 prod = reduce(lambda a, b: a * b, n) for", "b == 1: return 1 while a > 1: q = a //", "reduce(lambda a, b: a * b, n) for n_i, a_i in zip(n, a):", "1: q = a // b a, b = b, a % b", "[2, 3, 2] # chinese_remainder(n, a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a):", "# chinese_remainder(n, a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum = 0", "< 0: x1 += b0 return x1 def numbers_from(l): regex = r\"-?\\d+\" return", "b0 = b x0, x1 = 0, 1 if b == 1: return", "import reduce import re # Usage: # n = [3, 5, 7] #", "b): b0 = b x0, x1 = 0, 1 if b == 1:", "// b a, b = b, a % b x0, x1 = x1", "+= a_i * mul_inv(p, n_i) * p return sum % prod def mul_inv(a,", "0: x1 += b0 return x1 def numbers_from(l): regex = r\"-?\\d+\" return [int(match)", "sum += a_i * mul_inv(p, n_i) * p return sum % prod def", "a): sum = 0 prod = reduce(lambda a, b: a * b, n)", "1 if b == 1: return 1 while a > 1: q =", "7] # a = [2, 3, 2] # chinese_remainder(n, a) == 23 #", "import re # Usage: # n = [3, 5, 7] # a =", "* b, n) for n_i, a_i in zip(n, a): p = prod //", "Usage: # n = [3, 5, 7] # a = [2, 3, 2]", "x1 def numbers_from(l): regex = r\"-?\\d+\" return [int(match) for match in re.findall(regex, l)]", "# n = [3, 5, 7] # a = [2, 3, 2] #", "prod def mul_inv(a, b): b0 = b x0, x1 = 0, 1 if", "return sum % prod def mul_inv(a, b): b0 = b x0, x1 =", "a, b: a * b, n) for n_i, a_i in zip(n, a): p", "* mul_inv(p, n_i) * p return sum % prod def mul_inv(a, b): b0", "0, 1 if b == 1: return 1 while a > 1: q", "// n_i sum += a_i * mul_inv(p, n_i) * p return sum %", "zip(n, a): p = prod // n_i sum += a_i * mul_inv(p, n_i)", "* p return sum % prod def mul_inv(a, b): b0 = b x0,", "in zip(n, a): p = prod // n_i sum += a_i * mul_inv(p,", "q = a // b a, b = b, a % b x0,", "a = [2, 3, 2] # chinese_remainder(n, a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def", "b, n) for n_i, a_i in zip(n, a): p = prod // n_i", "n = [3, 5, 7] # a = [2, 3, 2] # chinese_remainder(n,", "# https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum = 0 prod = reduce(lambda a, b:", "b, a % b x0, x1 = x1 - q * x0, x0", "x1 += b0 return x1 def numbers_from(l): regex = r\"-?\\d+\" return [int(match) for", "a * b, n) for n_i, a_i in zip(n, a): p = prod", "x1 < 0: x1 += b0 return x1 def numbers_from(l): regex = r\"-?\\d+\"", "prod // n_i sum += a_i * mul_inv(p, n_i) * p return sum", "from functools import reduce import re # Usage: # n = [3, 5,", "q * x0, x0 if x1 < 0: x1 += b0 return x1", "- q * x0, x0 if x1 < 0: x1 += b0 return", "[3, 5, 7] # a = [2, 3, 2] # chinese_remainder(n, a) ==", "% b x0, x1 = x1 - q * x0, x0 if x1", "return x1 def numbers_from(l): regex = r\"-?\\d+\" return [int(match) for match in re.findall(regex,", "= x1 - q * x0, x0 if x1 < 0: x1 +=", "= 0, 1 if b == 1: return 1 while a > 1:", "chinese_remainder(n, a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum = 0 prod", "n_i) * p return sum % prod def mul_inv(a, b): b0 = b", "x1 - q * x0, x0 if x1 < 0: x1 += b0", "a // b a, b = b, a % b x0, x1 =", "a_i in zip(n, a): p = prod // n_i sum += a_i *", "b = b, a % b x0, x1 = x1 - q *", "chinese_remainder(n, a): sum = 0 prod = reduce(lambda a, b: a * b,", "reduce import re # Usage: # n = [3, 5, 7] # a", "a): p = prod // n_i sum += a_i * mul_inv(p, n_i) *", "0 prod = reduce(lambda a, b: a * b, n) for n_i, a_i", "while a > 1: q = a // b a, b = b,", "= b, a % b x0, x1 = x1 - q * x0,", "re # Usage: # n = [3, 5, 7] # a = [2,", "x0, x1 = 0, 1 if b == 1: return 1 while a", "x0 if x1 < 0: x1 += b0 return x1 def numbers_from(l): regex", "def chinese_remainder(n, a): sum = 0 prod = reduce(lambda a, b: a *", "sum % prod def mul_inv(a, b): b0 = b x0, x1 = 0,", "# a = [2, 3, 2] # chinese_remainder(n, a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6", "= 0 prod = reduce(lambda a, b: a * b, n) for n_i,", "<reponame>josephroquedev/advent-of-code from functools import reduce import re # Usage: # n = [3,", "1: return 1 while a > 1: q = a // b a,", "+= b0 return x1 def numbers_from(l): regex = r\"-?\\d+\" return [int(match) for match", "b x0, x1 = x1 - q * x0, x0 if x1 <", "# Usage: # n = [3, 5, 7] # a = [2, 3,", "= prod // n_i sum += a_i * mul_inv(p, n_i) * p return", "5, 7] # a = [2, 3, 2] # chinese_remainder(n, a) == 23", "p return sum % prod def mul_inv(a, b): b0 = b x0, x1", "n_i sum += a_i * mul_inv(p, n_i) * p return sum % prod", "if b == 1: return 1 while a > 1: q = a", "if x1 < 0: x1 += b0 return x1 def numbers_from(l): regex =", "= [3, 5, 7] # a = [2, 3, 2] # chinese_remainder(n, a)", "2] # chinese_remainder(n, a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n, a): sum =", "x0, x0 if x1 < 0: x1 += b0 return x1 def numbers_from(l):", "= [2, 3, 2] # chinese_remainder(n, a) == 23 # https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6 def chinese_remainder(n,", "* x0, x0 if x1 < 0: x1 += b0 return x1 def" ]
[ "<filename>paths_cli/__init__.py<gh_stars>1-10 from .cli import OpenPathSamplingCLI from . import commands from . import version" ]
[ "import unicode_literals from django.db import models from django.conf import settings from MapApi import", "from django.conf import settings from MapApi import signals from django.dispatch import receiver from", "import settings from MapApi import signals from django.dispatch import receiver from rest_framework.authtoken.models import", "from MapApi import signals from django.dispatch import receiver from rest_framework.authtoken.models import Token @receiver(signals.user_login,", "django.dispatch import receiver from rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False,", "from django.db import models from django.conf import settings from MapApi import signals from", "settings from MapApi import signals from django.dispatch import receiver from rest_framework.authtoken.models import Token", "def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance) # Create your models here.", "__future__ import unicode_literals from django.db import models from django.conf import settings from MapApi", "from __future__ import unicode_literals from django.db import models from django.conf import settings from", "rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance)", "Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance) # Create", "from django.dispatch import receiver from rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None,", "from rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created:", "<reponame>todor943/mapEngine from __future__ import unicode_literals from django.db import models from django.conf import settings", "signals from django.dispatch import receiver from rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender,", "import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance) #", "unicode_literals from django.db import models from django.conf import settings from MapApi import signals", "django.db import models from django.conf import settings from MapApi import signals from django.dispatch", "sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance) # Create your models", "MapApi import signals from django.dispatch import receiver from rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL)", "@receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance) # Create your", "import signals from django.dispatch import receiver from rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def", "import receiver from rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs):", "import models from django.conf import settings from MapApi import signals from django.dispatch import", "django.conf import settings from MapApi import signals from django.dispatch import receiver from rest_framework.authtoken.models", "models from django.conf import settings from MapApi import signals from django.dispatch import receiver", "receiver from rest_framework.authtoken.models import Token @receiver(signals.user_login, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if" ]
[ "used in place of the card prompt. :param f: a TextIO Stream of", "for file in files: file_path = os.path.join(path, file) if os.path.isfile(file_path) and file.endswith('.json'): yield", "files = os.listdir(path) for file in files: file_path = os.path.join(path, file) if os.path.isfile(file_path)", "are then deleted, \"closed\". cards = [card for card in cards if not", "list was created. Sorts were considered to end when the # last card", "Iterator from dateutil.parser import isoparse from utils.sorts import Sort, Group def parse_board(f: TextIO,", "more useful for analysis. Card prompts will be mapped to the given ID", "created. Sorts were considered to end when the # last card move or", "analysis. Card prompts will be mapped to the given ID when parsing and", "mapped to the given ID when parsing and used in place of the", "= os.path.join(path, file) if os.path.isfile(file_path) and file.endswith('.json'): yield file_path def parse_sorts_in_dir(path: str, card_mapping:", "in action_data: valid_actions.append(action) # List is created elif action_type == 'createList': valid_actions.append(action) #", "json file. A card_mapping maps the card prompts to an ID which is", "end when the # last card move or list rename action was performed.", "cards = [card for card in cards if not card['closed']] for card in", "not None: card_data = card_mapping[card['name']] else: card_data = card['name'] group.cards.add(card_data) actions = data['actions']", "files: file_path = os.path.join(path, file) if os.path.isfile(file_path) and file.endswith('.json'): yield file_path def parse_sorts_in_dir(path:", ":param card_mapping: a mapping of card names to card ids :return: a Sort", "typing import TextIO, Hashable, Iterator from dateutil.parser import isoparse from utils.sorts import Sort,", "a path to a directory :return: the list of paths to json files", "add cards which are then deleted, \"closed\". cards = [card for card in", "which is usually more useful for analysis. Card prompts will be mapped to", "and list renaming are considered. valid_actions = [] for action in actions: action_data", "place of the card prompt. :param f: a TextIO Stream of the trello", "to json files in the given directory. Nested directories are not traversed. :param", "= isoparse(actions[-1]['date']) total_sort_time = end_time - start_time # Empty groups are discarded. groups", "will be mapped to the given ID when parsing and used in place", "discarded. groups = [group for group in groups_by_id.values() if group.cards] sort_name = data['name']", "= Sort(sort_name, groups, cards, total_sort_time) return sort def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\"", "in cards if not card['closed']] for card in cards: group_id = card['idList'] group", ":return: the list of paths to json files in the given directory \"\"\"", "os.listdir(path) for file in files: file_path = os.path.join(path, file) if os.path.isfile(file_path) and file.endswith('.json'):", "\"\"\" Returns a list of paths to json files in the given directory.", "not card['closed']] for card in cards: group_id = card['idList'] group = groups_by_id[group_id] #", "not traversed. :param path: a path to a directory :return: the list of", "actions.sort(key=lambda x: isoparse(x['date'])) # Only card moves, list creation, and list renaming are", "first trello list was created. Sorts were considered to end when the #", "file.endswith('.json'): yield file_path def parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable] = None) -> list[Sort]:", "if action['type'] == 'createList') start_time = isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time = end_time", "trello board json file :param card_mapping: a mapping of card names to card", "if group.cards] sort_name = data['name'] cards = set(card_mapping.values()) sort = Sort(sort_name, groups, cards,", "cards = set(card_mapping.values()) sort = Sort(sort_name, groups, cards, total_sort_time) return sort def get_paths_to_jsons_in_dir(path:", "= isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time = end_time - start_time # Empty groups", "str, card_mapping: dict[str, Hashable] = None) -> list[Sort]: \"\"\" Parses all sorts in", "= group cards = data['cards'] # Participants may accidentally add cards which are", "path in trello_json_paths: with open(path, 'r') as f: sort = parse_board(f, card_mapping) sorts.append(sort)", "study, sorts were considered to start when the # first trello list was", "card ids :return: a list of Sort objects \"\"\" sorts = [] trello_json_paths", "sort_name = data['name'] cards = set(card_mapping.values()) sort = Sort(sort_name, groups, cards, total_sort_time) return", "to calling parse_sort on each json file in the given directory. :param path:", "group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) # Only card moves, list creation,", "and used in place of the card prompt. :param f: a TextIO Stream", "their lists by list ID. So, a temporary mapping # from list IDs", "return sort def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\" Returns a list of paths", "cards if not card['closed']] for card in cards: group_id = card['idList'] group =", "= [card for card in cards if not card['closed']] for card in cards:", "path: a path to a directory :return: the list of paths to json", "-> Sort: \"\"\" Extracts the information from a trello board json file. A", "[group for group in groups_by_id.values() if group.cards] sort_name = data['name'] cards = set(card_mapping.values())", "directory. Nested directories are not traversed. This is equivalent to calling parse_sort on", "trello_lists: group_name = trello_list['name'] list_id = trello_list['id'] group = Group(group_name) groups_by_id[list_id] = group", "accidentally add cards which are then deleted, \"closed\". cards = [card for card", "were considered to start when the # first trello list was created. Sorts", "= [] trello_json_paths = get_paths_to_jsons_in_dir(path) for path in trello_json_paths: with open(path, 'r') as", "= os.listdir(path) for file in files: file_path = os.path.join(path, file) if os.path.isfile(file_path) and", "= [group for group in groups_by_id.values() if group.cards] sort_name = data['name'] cards =", "file) if os.path.isfile(file_path) and file.endswith('.json'): yield file_path def parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable]", "directory :param card_mapping: an optional mapping of card names to card ids :return:", "group.cards] sort_name = data['name'] cards = set(card_mapping.values()) sort = Sort(sort_name, groups, cards, total_sort_time)", "= data['lists'] trello_lists.sort(key=lambda x: x['pos']) # Cards are linked to their lists by", "more useful to map card prompts to an ID for analysis if card_mapping", "and file.endswith('.json'): yield file_path def parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable] = None) ->", "trello_list in trello_lists: group_name = trello_list['name'] list_id = trello_list['id'] group = Group(group_name) groups_by_id[list_id]", "dict[str, Hashable]) -> Sort: \"\"\" Extracts the information from a trello board json", "Card prompts will be mapped to the given ID when parsing and used", "actions: action_data = action['data'] action_type = action['type'] # Card is moved if action_type", "TextIO, Hashable, Iterator from dateutil.parser import isoparse from utils.sorts import Sort, Group def", "is renamed elif action_type == 'updateList' and 'name' in action_data['old']: valid_actions.append(action) # For", "-> Iterator[str]: \"\"\" Returns a list of paths to json files in the", "Parses all sorts in the given directory. Nested directories are not traversed. This", "valid_actions.append(action) # List is renamed elif action_type == 'updateList' and 'name' in action_data['old']:", "[card for card in cards if not card['closed']] for card in cards: group_id", "= get_paths_to_jsons_in_dir(path) for path in trello_json_paths: with open(path, 'r') as f: sort =", "file_path = os.path.join(path, file) if os.path.isfile(file_path) and file.endswith('.json'): yield file_path def parse_sorts_in_dir(path: str,", "move or list rename action was performed. first_list = next(action for action in", "action_type == 'updateList' and 'name' in action_data['old']: valid_actions.append(action) # For the purposes of", "Nested directories are not traversed. :param path: a path to a directory :return:", "card ids :return: a Sort object \"\"\" data = json.load(f) trello_lists = data['lists']", "end_time - start_time # Empty groups are discarded. groups = [group for group", "action_type = action['type'] # Card is moved if action_type == 'updateCard' and 'listBefore'", "directory \"\"\" files = os.listdir(path) for file in files: file_path = os.path.join(path, file)", "list renaming are considered. valid_actions = [] for action in actions: action_data =", "list[Sort]: \"\"\" Parses all sorts in the given directory. Nested directories are not", "For the purposes of this study, sorts were considered to start when the", "is needed. groups_by_id = {} for trello_list in trello_lists: group_name = trello_list['name'] list_id", "= card_mapping[card['name']] else: card_data = card['name'] group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda x: isoparse(x['date']))", "directory. Nested directories are not traversed. :param path: a path to a directory", "sort = Sort(sort_name, groups, cards, total_sort_time) return sort def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]:", "an ID which is usually more useful for analysis. Card prompts will be", "a mapping of card names to card ids :return: a Sort object \"\"\"", "list_id = trello_list['id'] group = Group(group_name) groups_by_id[list_id] = group cards = data['cards'] #", "given directory. Nested directories are not traversed. :param path: a path to a", "action_data = action['data'] action_type = action['type'] # Card is moved if action_type ==", "in trello_lists: group_name = trello_list['name'] list_id = trello_list['id'] group = Group(group_name) groups_by_id[list_id] =", "'name' in action_data['old']: valid_actions.append(action) # For the purposes of this study, sorts were", "when parsing and used in place of the card prompt. :param f: a", "ids :return: a Sort object \"\"\" data = json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda", "in groups_by_id.values() if group.cards] sort_name = data['name'] cards = set(card_mapping.values()) sort = Sort(sort_name,", "action in valid_actions if action['type'] == 'createList') start_time = isoparse(first_list['date']) end_time = isoparse(actions[-1]['date'])", "created elif action_type == 'createList': valid_actions.append(action) # List is renamed elif action_type ==", "start when the # first trello list was created. Sorts were considered to", "the card prompt. :param f: a TextIO Stream of the trello board json", "paths to json files in the given directory. Nested directories are not traversed.", "card prompts to an ID which is usually more useful for analysis. Card", "== 'createList') start_time = isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time = end_time - start_time", "data = json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda x: x['pos']) # Cards are linked", "trello_list['name'] list_id = trello_list['id'] group = Group(group_name) groups_by_id[list_id] = group cards = data['cards']", ":param f: a TextIO Stream of the trello board json file :param card_mapping:", "a path to a directory :param card_mapping: an optional mapping of card names", "[] for action in actions: action_data = action['data'] action_type = action['type'] # Card", "= action['type'] # Card is moved if action_type == 'updateCard' and 'listBefore' in", "groups = [group for group in groups_by_id.values() if group.cards] sort_name = data['name'] cards", "traversed. :param path: a path to a directory :return: the list of paths", "the given ID when parsing and used in place of the card prompt.", "- start_time # Empty groups are discarded. groups = [group for group in", "directories are not traversed. This is equivalent to calling parse_sort on each json", "cards = data['cards'] # Participants may accidentally add cards which are then deleted,", "valid_actions = [] for action in actions: action_data = action['data'] action_type = action['type']", "given directory. Nested directories are not traversed. This is equivalent to calling parse_sort", "in files: file_path = os.path.join(path, file) if os.path.isfile(file_path) and file.endswith('.json'): yield file_path def", "yield file_path def parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable] = None) -> list[Sort]: \"\"\"", "def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort: \"\"\" Extracts the information from", "sorts in the given directory. Nested directories are not traversed. This is equivalent", "in the given directory. Nested directories are not traversed. :param path: a path", "# List is renamed elif action_type == 'updateList' and 'name' in action_data['old']: valid_actions.append(action)", "to json files in the given directory \"\"\" files = os.listdir(path) for file", "names to card ids :return: a list of Sort objects \"\"\" sorts =", "optional mapping of card names to card ids :return: a list of Sort", "if not card['closed']] for card in cards: group_id = card['idList'] group = groups_by_id[group_id]", "None) -> list[Sort]: \"\"\" Parses all sorts in the given directory. Nested directories", "= {} for trello_list in trello_lists: group_name = trello_list['name'] list_id = trello_list['id'] group", "prompts to an ID for analysis if card_mapping is not None: card_data =", "for action in valid_actions if action['type'] == 'createList') start_time = isoparse(first_list['date']) end_time =", "Hashable, Iterator from dateutil.parser import isoparse from utils.sorts import Sort, Group def parse_board(f:", "So, a temporary mapping # from list IDs to groups is needed. groups_by_id", "TextIO, card_mapping: dict[str, Hashable]) -> Sort: \"\"\" Extracts the information from a trello", "card_data = card['name'] group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) # Only card", "is created elif action_type == 'createList': valid_actions.append(action) # List is renamed elif action_type", "calling parse_sort on each json file in the given directory. :param path: a", "trello_lists.sort(key=lambda x: x['pos']) # Cards are linked to their lists by list ID.", "to an ID for analysis if card_mapping is not None: card_data = card_mapping[card['name']]", "Sort object \"\"\" data = json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda x: x['pos']) #", "action_type == 'createList': valid_actions.append(action) # List is renamed elif action_type == 'updateList' and", "card moves, list creation, and list renaming are considered. valid_actions = [] for", "is usually more useful for analysis. Card prompts will be mapped to the", "to card ids :return: a list of Sort objects \"\"\" sorts = []", "ID. So, a temporary mapping # from list IDs to groups is needed.", "card_mapping: dict[str, Hashable]) -> Sort: \"\"\" Extracts the information from a trello board", "was performed. first_list = next(action for action in valid_actions if action['type'] == 'createList')", "to a directory :return: the list of paths to json files in the", "prompts to an ID which is usually more useful for analysis. Card prompts", "on each json file in the given directory. :param path: a path to", "f: a TextIO Stream of the trello board json file :param card_mapping: a", "useful to map card prompts to an ID for analysis if card_mapping is", "objects \"\"\" sorts = [] trello_json_paths = get_paths_to_jsons_in_dir(path) for path in trello_json_paths: with", "equivalent to calling parse_sort on each json file in the given directory. :param", "the card prompts to an ID which is usually more useful for analysis.", "action_type == 'updateCard' and 'listBefore' in action_data: valid_actions.append(action) # List is created elif", "# Empty groups are discarded. groups = [group for group in groups_by_id.values() if", "are linked to their lists by list ID. So, a temporary mapping #", "Group def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort: \"\"\" Extracts the information", "import Sort, Group def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort: \"\"\" Extracts", "None: card_data = card_mapping[card['name']] else: card_data = card['name'] group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda", "each json file in the given directory. :param path: a path to a", "groups is needed. groups_by_id = {} for trello_list in trello_lists: group_name = trello_list['name']", "groups_by_id[list_id] = group cards = data['cards'] # Participants may accidentally add cards which", "is equivalent to calling parse_sort on each json file in the given directory.", "the # last card move or list rename action was performed. first_list =", "Iterator[str]: \"\"\" Returns a list of paths to json files in the given", "file :param card_mapping: a mapping of card names to card ids :return: a", "# It may be more useful to map card prompts to an ID", "json file in the given directory. :param path: a path to a directory", "the information from a trello board json file. A card_mapping maps the card", "to an ID which is usually more useful for analysis. Card prompts will", ":return: a Sort object \"\"\" data = json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda x:", ":param path: a path to a directory :param card_mapping: an optional mapping of", "of the card prompt. :param f: a TextIO Stream of the trello board", "may accidentally add cards which are then deleted, \"closed\". cards = [card for", ":return: a list of Sort objects \"\"\" sorts = [] trello_json_paths = get_paths_to_jsons_in_dir(path)", "# Card is moved if action_type == 'updateCard' and 'listBefore' in action_data: valid_actions.append(action)", "= action['data'] action_type = action['type'] # Card is moved if action_type == 'updateCard'", "card prompt. :param f: a TextIO Stream of the trello board json file", "dict[str, Hashable] = None) -> list[Sort]: \"\"\" Parses all sorts in the given", "\"\"\" data = json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda x: x['pos']) # Cards are", "# Only card moves, list creation, and list renaming are considered. valid_actions =", "os.path.join(path, file) if os.path.isfile(file_path) and file.endswith('.json'): yield file_path def parse_sorts_in_dir(path: str, card_mapping: dict[str,", "Sort: \"\"\" Extracts the information from a trello board json file. A card_mapping", "not traversed. This is equivalent to calling parse_sort on each json file in", "object \"\"\" data = json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda x: x['pos']) # Cards", "mapping of card names to card ids :return: a Sort object \"\"\" data", "of paths to json files in the given directory \"\"\" files = os.listdir(path)", "first_list = next(action for action in valid_actions if action['type'] == 'createList') start_time =", "total_sort_time = end_time - start_time # Empty groups are discarded. groups = [group", "Cards are linked to their lists by list ID. So, a temporary mapping", "group_id = card['idList'] group = groups_by_id[group_id] # It may be more useful to", "prompt. :param f: a TextIO Stream of the trello board json file :param", "This is equivalent to calling parse_sort on each json file in the given", "from a trello board json file. A card_mapping maps the card prompts to", "It may be more useful to map card prompts to an ID for", "import isoparse from utils.sorts import Sort, Group def parse_board(f: TextIO, card_mapping: dict[str, Hashable])", "path to a directory :return: the list of paths to json files in", "in actions: action_data = action['data'] action_type = action['type'] # Card is moved if", "List is renamed elif action_type == 'updateList' and 'name' in action_data['old']: valid_actions.append(action) #", "card prompts to an ID for analysis if card_mapping is not None: card_data", "'updateCard' and 'listBefore' in action_data: valid_actions.append(action) # List is created elif action_type ==", "= groups_by_id[group_id] # It may be more useful to map card prompts to", "group = Group(group_name) groups_by_id[list_id] = group cards = data['cards'] # Participants may accidentally", "trello_lists = data['lists'] trello_lists.sort(key=lambda x: x['pos']) # Cards are linked to their lists", "json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda x: x['pos']) # Cards are linked to their", "given directory. :param path: a path to a directory :param card_mapping: an optional", "else: card_data = card['name'] group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) # Only", "by list ID. So, a temporary mapping # from list IDs to groups", "paths to json files in the given directory \"\"\" files = os.listdir(path) for", "list of paths to json files in the given directory. Nested directories are", "to card ids :return: a Sort object \"\"\" data = json.load(f) trello_lists =", "= data['cards'] # Participants may accidentally add cards which are then deleted, \"closed\".", "Card is moved if action_type == 'updateCard' and 'listBefore' in action_data: valid_actions.append(action) #", "were considered to end when the # last card move or list rename", "'createList': valid_actions.append(action) # List is renamed elif action_type == 'updateList' and 'name' in", "card names to card ids :return: a Sort object \"\"\" data = json.load(f)", "action['data'] action_type = action['type'] # Card is moved if action_type == 'updateCard' and", "an optional mapping of card names to card ids :return: a list of", "for action in actions: action_data = action['data'] action_type = action['type'] # Card is", "ID when parsing and used in place of the card prompt. :param f:", "be mapped to the given ID when parsing and used in place of", "isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time = end_time - start_time # Empty groups are", "usually more useful for analysis. Card prompts will be mapped to the given", "then deleted, \"closed\". cards = [card for card in cards if not card['closed']]", "mapping of card names to card ids :return: a list of Sort objects", "= set(card_mapping.values()) sort = Sort(sort_name, groups, cards, total_sort_time) return sort def get_paths_to_jsons_in_dir(path: str)", "isoparse(actions[-1]['date']) total_sort_time = end_time - start_time # Empty groups are discarded. groups =", "parse_sort on each json file in the given directory. :param path: a path", "# Participants may accidentally add cards which are then deleted, \"closed\". cards =", "= data['name'] cards = set(card_mapping.values()) sort = Sort(sort_name, groups, cards, total_sort_time) return sort", "card['idList'] group = groups_by_id[group_id] # It may be more useful to map card", "sorts = [] trello_json_paths = get_paths_to_jsons_in_dir(path) for path in trello_json_paths: with open(path, 'r')", "# first trello list was created. Sorts were considered to end when the", "Only card moves, list creation, and list renaming are considered. valid_actions = []", "of card names to card ids :return: a Sort object \"\"\" data =", "map card prompts to an ID for analysis if card_mapping is not None:", "is moved if action_type == 'updateCard' and 'listBefore' in action_data: valid_actions.append(action) # List", "files in the given directory. Nested directories are not traversed. :param path: a", "rename action was performed. first_list = next(action for action in valid_actions if action['type']", "= json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda x: x['pos']) # Cards are linked to", "renaming are considered. valid_actions = [] for action in actions: action_data = action['data']", "file in files: file_path = os.path.join(path, file) if os.path.isfile(file_path) and file.endswith('.json'): yield file_path", "import TextIO, Hashable, Iterator from dateutil.parser import isoparse from utils.sorts import Sort, Group", "file_path def parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable] = None) -> list[Sort]: \"\"\" Parses", "list creation, and list renaming are considered. valid_actions = [] for action in", "get_paths_to_jsons_in_dir(path) for path in trello_json_paths: with open(path, 'r') as f: sort = parse_board(f,", "are not traversed. This is equivalent to calling parse_sort on each json file", "card_mapping is not None: card_data = card_mapping[card['name']] else: card_data = card['name'] group.cards.add(card_data) actions", "A card_mapping maps the card prompts to an ID which is usually more", "Empty groups are discarded. groups = [group for group in groups_by_id.values() if group.cards]", "to the given ID when parsing and used in place of the card", "list ID. So, a temporary mapping # from list IDs to groups is", "considered to start when the # first trello list was created. Sorts were", "when the # first trello list was created. Sorts were considered to end", "Sorts were considered to end when the # last card move or list", "board json file. A card_mapping maps the card prompts to an ID which", "== 'createList': valid_actions.append(action) # List is renamed elif action_type == 'updateList' and 'name'", "elif action_type == 'createList': valid_actions.append(action) # List is renamed elif action_type == 'updateList'", "a directory :param card_mapping: an optional mapping of card names to card ids", "isoparse(x['date'])) # Only card moves, list creation, and list renaming are considered. valid_actions", "parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort: \"\"\" Extracts the information from a", "end_time = isoparse(actions[-1]['date']) total_sort_time = end_time - start_time # Empty groups are discarded.", "are not traversed. :param path: a path to a directory :return: the list", "TextIO Stream of the trello board json file :param card_mapping: a mapping of", "sort def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\" Returns a list of paths to", "for card in cards: group_id = card['idList'] group = groups_by_id[group_id] # It may", "Sort objects \"\"\" sorts = [] trello_json_paths = get_paths_to_jsons_in_dir(path) for path in trello_json_paths:", "this study, sorts were considered to start when the # first trello list", "deleted, \"closed\". cards = [card for card in cards if not card['closed']] for", "= next(action for action in valid_actions if action['type'] == 'createList') start_time = isoparse(first_list['date'])", "Sort, Group def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort: \"\"\" Extracts the", "a directory :return: the list of paths to json files in the given", "to groups is needed. groups_by_id = {} for trello_list in trello_lists: group_name =", "-> list[Sort]: \"\"\" Parses all sorts in the given directory. Nested directories are", "list of Sort objects \"\"\" sorts = [] trello_json_paths = get_paths_to_jsons_in_dir(path) for path", "or list rename action was performed. first_list = next(action for action in valid_actions", "to start when the # first trello list was created. Sorts were considered", "card in cards if not card['closed']] for card in cards: group_id = card['idList']", "Hashable]) -> Sort: \"\"\" Extracts the information from a trello board json file.", "groups_by_id.values() if group.cards] sort_name = data['name'] cards = set(card_mapping.values()) sort = Sort(sort_name, groups,", "os from typing import TextIO, Hashable, Iterator from dateutil.parser import isoparse from utils.sorts", "action_data: valid_actions.append(action) # List is created elif action_type == 'createList': valid_actions.append(action) # List", "ids :return: a list of Sort objects \"\"\" sorts = [] trello_json_paths =", "groups_by_id[group_id] # It may be more useful to map card prompts to an", "trello_json_paths = get_paths_to_jsons_in_dir(path) for path in trello_json_paths: with open(path, 'r') as f: sort", "trello board json file. A card_mapping maps the card prompts to an ID", "was created. Sorts were considered to end when the # last card move", "def parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable] = None) -> list[Sort]: \"\"\" Parses all", "is not None: card_data = card_mapping[card['name']] else: card_data = card['name'] group.cards.add(card_data) actions =", "import os from typing import TextIO, Hashable, Iterator from dateutil.parser import isoparse from", "traversed. This is equivalent to calling parse_sort on each json file in the", "are considered. valid_actions = [] for action in actions: action_data = action['data'] action_type", "if action_type == 'updateCard' and 'listBefore' in action_data: valid_actions.append(action) # List is created", "= [] for action in actions: action_data = action['data'] action_type = action['type'] #", "parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable] = None) -> list[Sort]: \"\"\" Parses all sorts", "an ID for analysis if card_mapping is not None: card_data = card_mapping[card['name']] else:", "= card['name'] group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) # Only card moves,", "group cards = data['cards'] # Participants may accidentally add cards which are then", "= data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) # Only card moves, list creation, and list", "action_data['old']: valid_actions.append(action) # For the purposes of this study, sorts were considered to", "Sort(sort_name, groups, cards, total_sort_time) return sort def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\" Returns", "# Cards are linked to their lists by list ID. So, a temporary", "card_mapping[card['name']] else: card_data = card['name'] group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) #", "names to card ids :return: a Sort object \"\"\" data = json.load(f) trello_lists", "import json import os from typing import TextIO, Hashable, Iterator from dateutil.parser import", "directory :return: the list of paths to json files in the given directory", "group_name = trello_list['name'] list_id = trello_list['id'] group = Group(group_name) groups_by_id[list_id] = group cards", "of card names to card ids :return: a list of Sort objects \"\"\"", "for card in cards if not card['closed']] for card in cards: group_id =", "are discarded. groups = [group for group in groups_by_id.values() if group.cards] sort_name =", "to end when the # last card move or list rename action was", "last card move or list rename action was performed. first_list = next(action for", "card['name'] group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) # Only card moves, list", "from utils.sorts import Sort, Group def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort:", "valid_actions if action['type'] == 'createList') start_time = isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time =", "= trello_list['name'] list_id = trello_list['id'] group = Group(group_name) groups_by_id[list_id] = group cards =", "needed. groups_by_id = {} for trello_list in trello_lists: group_name = trello_list['name'] list_id =", "action in actions: action_data = action['data'] action_type = action['type'] # Card is moved", "# from list IDs to groups is needed. groups_by_id = {} for trello_list", "def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\" Returns a list of paths to json", "list of paths to json files in the given directory \"\"\" files =", "os.path.isfile(file_path) and file.endswith('.json'): yield file_path def parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable] = None)", "moves, list creation, and list renaming are considered. valid_actions = [] for action", "path to a directory :param card_mapping: an optional mapping of card names to", "card_mapping: an optional mapping of card names to card ids :return: a list", "from dateutil.parser import isoparse from utils.sorts import Sort, Group def parse_board(f: TextIO, card_mapping:", "action was performed. first_list = next(action for action in valid_actions if action['type'] ==", "a trello board json file. A card_mapping maps the card prompts to an", "ID for analysis if card_mapping is not None: card_data = card_mapping[card['name']] else: card_data", "for trello_list in trello_lists: group_name = trello_list['name'] list_id = trello_list['id'] group = Group(group_name)", "elif action_type == 'updateList' and 'name' in action_data['old']: valid_actions.append(action) # For the purposes", "data['name'] cards = set(card_mapping.values()) sort = Sort(sort_name, groups, cards, total_sort_time) return sort def", "trello_json_paths: with open(path, 'r') as f: sort = parse_board(f, card_mapping) sorts.append(sort) return sorts", "# last card move or list rename action was performed. first_list = next(action", "{} for trello_list in trello_lists: group_name = trello_list['name'] list_id = trello_list['id'] group =", ":param card_mapping: an optional mapping of card names to card ids :return: a", "data['cards'] # Participants may accidentally add cards which are then deleted, \"closed\". cards", "x: isoparse(x['date'])) # Only card moves, list creation, and list renaming are considered.", "in the given directory. :param path: a path to a directory :param card_mapping:", "'createList') start_time = isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time = end_time - start_time #", "start_time = isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time = end_time - start_time # Empty", "creation, and list renaming are considered. valid_actions = [] for action in actions:", "groups, cards, total_sort_time) return sort def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\" Returns a", "get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\" Returns a list of paths to json files", "== 'updateList' and 'name' in action_data['old']: valid_actions.append(action) # For the purposes of this", "# For the purposes of this study, sorts were considered to start when", "which are then deleted, \"closed\". cards = [card for card in cards if", "for path in trello_json_paths: with open(path, 'r') as f: sort = parse_board(f, card_mapping)", "\"closed\". cards = [card for card in cards if not card['closed']] for card", "useful for analysis. Card prompts will be mapped to the given ID when", "action['type'] # Card is moved if action_type == 'updateCard' and 'listBefore' in action_data:", "Participants may accidentally add cards which are then deleted, \"closed\". cards = [card", "in the given directory \"\"\" files = os.listdir(path) for file in files: file_path", "directories are not traversed. :param path: a path to a directory :return: the", "data['lists'] trello_lists.sort(key=lambda x: x['pos']) # Cards are linked to their lists by list", "the purposes of this study, sorts were considered to start when the #", "for analysis. Card prompts will be mapped to the given ID when parsing", "isoparse from utils.sorts import Sort, Group def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) ->", "\"\"\" Parses all sorts in the given directory. Nested directories are not traversed.", "in trello_json_paths: with open(path, 'r') as f: sort = parse_board(f, card_mapping) sorts.append(sort) return", "= None) -> list[Sort]: \"\"\" Parses all sorts in the given directory. Nested", "dateutil.parser import isoparse from utils.sorts import Sort, Group def parse_board(f: TextIO, card_mapping: dict[str,", "[] trello_json_paths = get_paths_to_jsons_in_dir(path) for path in trello_json_paths: with open(path, 'r') as f:", "\"\"\" files = os.listdir(path) for file in files: file_path = os.path.join(path, file) if", "\"\"\" Extracts the information from a trello board json file. A card_mapping maps", "# List is created elif action_type == 'createList': valid_actions.append(action) # List is renamed", "json import os from typing import TextIO, Hashable, Iterator from dateutil.parser import isoparse", "the # first trello list was created. Sorts were considered to end when", "files in the given directory \"\"\" files = os.listdir(path) for file in files:", "Stream of the trello board json file :param card_mapping: a mapping of card", "= Group(group_name) groups_by_id[list_id] = group cards = data['cards'] # Participants may accidentally add", "all sorts in the given directory. Nested directories are not traversed. This is", "x['pos']) # Cards are linked to their lists by list ID. So, a", "card names to card ids :return: a list of Sort objects \"\"\" sorts", "ID which is usually more useful for analysis. Card prompts will be mapped", "Hashable] = None) -> list[Sort]: \"\"\" Parses all sorts in the given directory.", "for analysis if card_mapping is not None: card_data = card_mapping[card['name']] else: card_data =", "card_mapping maps the card prompts to an ID which is usually more useful", "list IDs to groups is needed. groups_by_id = {} for trello_list in trello_lists:", "json files in the given directory \"\"\" files = os.listdir(path) for file in", "purposes of this study, sorts were considered to start when the # first", "start_time # Empty groups are discarded. groups = [group for group in groups_by_id.values()", "to map card prompts to an ID for analysis if card_mapping is not", "and 'name' in action_data['old']: valid_actions.append(action) # For the purposes of this study, sorts", "the given directory \"\"\" files = os.listdir(path) for file in files: file_path =", "Returns a list of paths to json files in the given directory. Nested", "in action_data['old']: valid_actions.append(action) # For the purposes of this study, sorts were considered", "in cards: group_id = card['idList'] group = groups_by_id[group_id] # It may be more", "file. A card_mapping maps the card prompts to an ID which is usually", "board json file :param card_mapping: a mapping of card names to card ids", "prompts will be mapped to the given ID when parsing and used in", "moved if action_type == 'updateCard' and 'listBefore' in action_data: valid_actions.append(action) # List is", "if card_mapping is not None: card_data = card_mapping[card['name']] else: card_data = card['name'] group.cards.add(card_data)", "cards: group_id = card['idList'] group = groups_by_id[group_id] # It may be more useful", "= trello_list['id'] group = Group(group_name) groups_by_id[list_id] = group cards = data['cards'] # Participants", "parsing and used in place of the card prompt. :param f: a TextIO", "card_mapping: dict[str, Hashable] = None) -> list[Sort]: \"\"\" Parses all sorts in the", "path: a path to a directory :param card_mapping: an optional mapping of card", "trello list was created. Sorts were considered to end when the # last", "card_mapping: a mapping of card names to card ids :return: a Sort object", "be more useful to map card prompts to an ID for analysis if", "the trello board json file :param card_mapping: a mapping of card names to", "if os.path.isfile(file_path) and file.endswith('.json'): yield file_path def parse_sorts_in_dir(path: str, card_mapping: dict[str, Hashable] =", "valid_actions.append(action) # For the purposes of this study, sorts were considered to start", "Group(group_name) groups_by_id[list_id] = group cards = data['cards'] # Participants may accidentally add cards", "temporary mapping # from list IDs to groups is needed. groups_by_id = {}", "set(card_mapping.values()) sort = Sort(sort_name, groups, cards, total_sort_time) return sort def get_paths_to_jsons_in_dir(path: str) ->", "Extracts the information from a trello board json file. A card_mapping maps the", "IDs to groups is needed. groups_by_id = {} for trello_list in trello_lists: group_name", "mapping # from list IDs to groups is needed. groups_by_id = {} for", "a TextIO Stream of the trello board json file :param card_mapping: a mapping", "list rename action was performed. first_list = next(action for action in valid_actions if", "Nested directories are not traversed. This is equivalent to calling parse_sort on each", "total_sort_time) return sort def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\" Returns a list of", "analysis if card_mapping is not None: card_data = card_mapping[card['name']] else: card_data = card['name']", "in the given directory. Nested directories are not traversed. This is equivalent to", "given ID when parsing and used in place of the card prompt. :param", "of the trello board json file :param card_mapping: a mapping of card names", "next(action for action in valid_actions if action['type'] == 'createList') start_time = isoparse(first_list['date']) end_time", "= end_time - start_time # Empty groups are discarded. groups = [group for", "in valid_actions if action['type'] == 'createList') start_time = isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time", "x: x['pos']) # Cards are linked to their lists by list ID. So,", "a temporary mapping # from list IDs to groups is needed. groups_by_id =", "from list IDs to groups is needed. groups_by_id = {} for trello_list in", "and 'listBefore' in action_data: valid_actions.append(action) # List is created elif action_type == 'createList':", "when the # last card move or list rename action was performed. first_list", "trello_list['id'] group = Group(group_name) groups_by_id[list_id] = group cards = data['cards'] # Participants may", "utils.sorts import Sort, Group def parse_board(f: TextIO, card_mapping: dict[str, Hashable]) -> Sort: \"\"\"", "actions = data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) # Only card moves, list creation, and", "information from a trello board json file. A card_mapping maps the card prompts", "a Sort object \"\"\" data = json.load(f) trello_lists = data['lists'] trello_lists.sort(key=lambda x: x['pos'])", "linked to their lists by list ID. So, a temporary mapping # from", "= card['idList'] group = groups_by_id[group_id] # It may be more useful to map", "group = groups_by_id[group_id] # It may be more useful to map card prompts", "the list of paths to json files in the given directory \"\"\" files", "cards, total_sort_time) return sort def get_paths_to_jsons_in_dir(path: str) -> Iterator[str]: \"\"\" Returns a list", "file in the given directory. :param path: a path to a directory :param", "considered. valid_actions = [] for action in actions: action_data = action['data'] action_type =", "json file :param card_mapping: a mapping of card names to card ids :return:", "sorts were considered to start when the # first trello list was created.", "action['type'] == 'createList') start_time = isoparse(first_list['date']) end_time = isoparse(actions[-1]['date']) total_sort_time = end_time -", "the given directory. :param path: a path to a directory :param card_mapping: an", "directory. :param path: a path to a directory :param card_mapping: an optional mapping", "renamed elif action_type == 'updateList' and 'name' in action_data['old']: valid_actions.append(action) # For the", "valid_actions.append(action) # List is created elif action_type == 'createList': valid_actions.append(action) # List is", "json files in the given directory. Nested directories are not traversed. :param path:", "== 'updateCard' and 'listBefore' in action_data: valid_actions.append(action) # List is created elif action_type", "group in groups_by_id.values() if group.cards] sort_name = data['name'] cards = set(card_mapping.values()) sort =", "from typing import TextIO, Hashable, Iterator from dateutil.parser import isoparse from utils.sorts import", "for group in groups_by_id.values() if group.cards] sort_name = data['name'] cards = set(card_mapping.values()) sort", "to a directory :param card_mapping: an optional mapping of card names to card", "performed. first_list = next(action for action in valid_actions if action['type'] == 'createList') start_time", "\"\"\" sorts = [] trello_json_paths = get_paths_to_jsons_in_dir(path) for path in trello_json_paths: with open(path,", "card in cards: group_id = card['idList'] group = groups_by_id[group_id] # It may be", "str) -> Iterator[str]: \"\"\" Returns a list of paths to json files in", "'updateList' and 'name' in action_data['old']: valid_actions.append(action) # For the purposes of this study,", "cards which are then deleted, \"closed\". cards = [card for card in cards", "of this study, sorts were considered to start when the # first trello", "List is created elif action_type == 'createList': valid_actions.append(action) # List is renamed elif", "the given directory. Nested directories are not traversed. This is equivalent to calling", "lists by list ID. So, a temporary mapping # from list IDs to", "groups_by_id = {} for trello_list in trello_lists: group_name = trello_list['name'] list_id = trello_list['id']", "card['closed']] for card in cards: group_id = card['idList'] group = groups_by_id[group_id] # It", "given directory \"\"\" files = os.listdir(path) for file in files: file_path = os.path.join(path,", "maps the card prompts to an ID which is usually more useful for", "data['actions'] actions.sort(key=lambda x: isoparse(x['date'])) # Only card moves, list creation, and list renaming", "card_data = card_mapping[card['name']] else: card_data = card['name'] group.cards.add(card_data) actions = data['actions'] actions.sort(key=lambda x:", "'listBefore' in action_data: valid_actions.append(action) # List is created elif action_type == 'createList': valid_actions.append(action)", ":param path: a path to a directory :return: the list of paths to", "may be more useful to map card prompts to an ID for analysis", "in place of the card prompt. :param f: a TextIO Stream of the", "considered to end when the # last card move or list rename action", "groups are discarded. groups = [group for group in groups_by_id.values() if group.cards] sort_name", "card move or list rename action was performed. first_list = next(action for action", "of Sort objects \"\"\" sorts = [] trello_json_paths = get_paths_to_jsons_in_dir(path) for path in", "a list of Sort objects \"\"\" sorts = [] trello_json_paths = get_paths_to_jsons_in_dir(path) for", "to their lists by list ID. So, a temporary mapping # from list", "a list of paths to json files in the given directory. Nested directories", "the given directory. Nested directories are not traversed. :param path: a path to", "of paths to json files in the given directory. Nested directories are not" ]
[ "_output(self, glyph, port=None): if port is None: output_port = self._output_port else: output_port =", "False class Random(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "in \"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"]) if", "= self.y + offset_y if not self._grid.is_inside(new_x, new_y): self.explode() return collider = self._grid.peek(new_x,", "%d\", self.name, self.x, self.y, port.x, port.y, ) self._grid.lock(port.x, port.y) output_port = self._output_port if", "super().__init__( grid, x, y, \"south\", \"Move southward or bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw", "self.erase() class Comment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "return value == 0 or mod == 1 class East(IOperator): def __init__(self, grid,", "self._output_port else: output_port = port if output_port is None: logging.warn( \"No output port", "y + 1, is_bang=True), } ) def operation(self, frame, force=False): a = self._grid.listen(self.ports[\"a\"])", "= self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self._grid.lock(self.x, self.y) for x", "is_passive=True, ) self.ports.update( { \"channel\": InputPort(self.x + 1, self.y), \"octave\": InputPort( self.x +", "self.y += offset_y self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property def", "def operation(self, frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for offset in", "self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame / rate) % mod return self._grid.key_of(value) class Delay(IOperator): def", "False def operation(self, frame, force=False): self.move(0, -1) self.is_passive = False class Random(IOperator): def", "of the grid given at construction time.\"\"\" def __str__(self): return self.name def run(self,", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"bang\", \"Bangs neighboring operands\",", "y, \"multiply\", \"Output multiplication of inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x", "DOT_GLYPH): self.explode() return self.erase() self.x += offset_x self.y += offset_y self._grid.poke(self.x, self.y, self.glyph)", "from orca.ports import InputPort, OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC):", "return True def _bang(self, payload): output_port = self._output_port if output_port is None: logger.warn(\"Trying", "+ 1, is_sensitive=True), } ) def operation(self, frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod", "self.y ) elif glyph is None: return else: if self._should_upper_case(): value = glyph.upper()", "OutputPort) and port.is_bang: continue logger.debug( \"Ops %s (%d, %d): locking port @ %d,", "InputPort(x - 2, y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(1, x)),", "is_bang=True), } ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"])", "operands with offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update( { \"x\": InputPort(x - 3, y),", "x)), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self, frame, force=False): key", "import random from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports import InputPort,", "= self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a)) class Clock(IOperator): def __init__(self,", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"midi\", \"Send MIDI note\",", ") def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a", "operation(self, frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length):", "self.y, clamp=lambda x: min(max(0, x), 8) ), \"note\": InputPort(self.x + 3, self.y), \"velocity\":", "x), 8) ), \"note\": InputPort(self.x + 3, self.y), \"velocity\": InputPort( self.x + 4,", "= False def operation(self, frame, force=False): self.move(0, 1) self.is_passive = False class Track(IOperator):", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"substract\", \"Output difference of inputs\",", "return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "self._grid.peek(new_x, new_y) if collider not in (BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase() self.x +=", "is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x, y - 1), OUTPUT_PORT_NAME: OutputPort(x, y +", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"south\", \"Move", "def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a *", "frame, force=False): self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y) class If(IOperator): def __init__(self,", "= False def operation(self, frame, force=False): self.move(1, 0) self.is_passive = False class Generator(IOperator):", "1)), } ) def operation(self, frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"])", "force=False): \"\"\"Run the operator for the given frame and return the payload. This", "x, y, \"south\", \"Move southward or bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw = False", "is_passive=is_passive, ) self.ports.update( { \"step\": InputPort(x - 1, y, default=\"1\"), \"mod\": InputPort(x +", "None: output_port = self._output_port else: output_port = port if output_port is None: logging.warn(", "-1) self.is_passive = False class Random(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "force=False): payload = self.operation(frame, force) for port in self.ports.values(): if isinstance(port, OutputPort) and", "+ self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "True return False def move(self, offset_x, offset_y): new_x = self.x + offset_x new_y", "if inputs are equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1,", "self._should_upper_case(): value = glyph.upper() else: value = glyph self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator):", "\"F\", \"f\", \"G\", \"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX = {k: i for i,", "b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b) class North(IOperator): def __init__(self, grid, x,", "in range(length): self._grid.lock(self.x + offset + 1, self.y) port = InputPort(self.x + 1", "OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def operation(self, frame, force=False): a", "grid, x, y, \"north\", \"Move northward or bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw =", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"south\", \"Move southward or", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"random\", \"Outputs", "\"length\": InputPort( self.x + 5, self.y, clamp=lambda x: min(max(0, x), 32) ), }", "value.upper() or value.upper() != value: return False else: return True def _bang(self, payload):", "== 1 class East(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "\"Halts southward operator\", glyph=\"h\", is_passive=is_passive, ) def operation(self, frame, force=False): self._grid.lock(self.x, self.y +", "/ rate) % mod return self._grid.key_of(value) class Delay(IOperator): def __init__(self, grid, x, y,", "explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self, glyph): for x, y in ((-1, 0),", "operation(self, frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) +", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"increment\", \"Increment operator southward\",", "elif glyph is None: return else: if self._should_upper_case(): value = glyph.upper() else: value", "grid, x, y, \"midi\", \"Send MIDI note\", glyph=\":\", is_passive=True, ) self.ports.update( { \"channel\":", "1) self.is_passive = False class Track(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15: return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity =", "- 3, y), \"y\": InputPort(x - 2, y), \"len\": InputPort(x - 1, y,", "InputPort(x - 1, y, clamp=lambda x: max(1, x)), \"mod\": InputPort(x + 1, y,", "OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC): def __init__( self, grid,", "value = glyph self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator): def __init__(self, grid, x, y,", "self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset in range(length): input_port = InputPort(self.x + offset +", "= BANG_GLYPH if payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def _output(self, glyph, port=None):", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"north\", \"Move northward or", "self._grid.poke(output_port.x, output_port.y, glyph) def _output(self, glyph, port=None): if port is None: output_port =", "0 or mod == 1 class East(IOperator): def __init__(self, grid, x, y, *,", "is_passive=False): super().__init__( grid, x, y, \"midi\", \"Send MIDI note\", glyph=\":\", is_passive=True, ) self.ports.update(", "the grid given at construction time.\"\"\" def __str__(self): return self.name def run(self, frame,", "low = self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high) return self._grid.key_of(value) class", "def operation(self, frame, force=False): \"\"\"Run the operator for the given frame and return", "is_passive=False): super().__init__( grid, x, y, \"generator\", \"Write operands with offset\", glyph=\"g\", is_passive=is_passive, )", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"comment\", \"Halts line\",", "frame, force=False): if not self.has_neighbor(BANG_GLYPH) and not force: return for port_name in \"channel\",", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"west\", \"Move westward or bang\",", "+ 1, self.y) value = self._grid.listen(right_port) if value.lower() == value.upper() or value.upper() !=", "{k: i for i, k in enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self, grid, x,", "if self._should_upper_case(): value = glyph.upper() else: value = glyph self._grid.poke(output_port.x, output_port.y, value) class", "self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y) class If(IOperator): def __init__(self, grid, x,", "input_port, f\"output{offset}\": output_port, } ) res = self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator): def", "OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): step =", ") self.ports.update( { \"channel\": InputPort(self.x + 1, self.y), \"octave\": InputPort( self.x + 2,", "def operation(self, frame, force=False): self._grid.lock(self.x, self.y) for x in range(self.x + 1, self._grid.cols):", "1 + key % length, self.y) return self._grid.listen(port) class West(IOperator): def __init__(self, grid,", "self.do_draw = False def operation(self, frame, force=False): self.move(-1, 0) self.is_passive = False class", "if output_port is None or not output_port.is_sensitive: return False else: right_port = InputPort(self.x", "output_port is None or not output_port.is_sensitive: return False else: right_port = InputPort(self.x +", "grid given at construction time.\"\"\" def __str__(self): return self.name def run(self, frame, force=False):", "output_port = self._output_port if output_port: if output_port.is_bang: self._bang(payload) else: self._output(payload) def erase(self): self._grid.poke(self.x,", "return for port_name in \"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note", "self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME)", "range(self.x + 1, self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x, self.y) == self.glyph: break _NOTES_VALUES", "DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if", "self.explode() return self.erase() self.x += offset_x self.y += offset_y self._grid.poke(self.x, self.y, self.glyph) if", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"delay\", \"Bangs", "y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame,", "= self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b) class North(IOperator): def __init__(self, grid, x, y,", "grid, x, y, \"j\", \"Outputs northward operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"val\":", "\"y\": InputPort(x - 2, y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(x,", "in ((-1, 0), (1, 0), (0, -1), (0, 1)): if self._grid.peek(self.x + x,", "self.do_draw = False def operation(self, frame, force=False): self.do_draw = False self.erase() class Comment(IOperator):", "y), \"max\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), }", "or bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(1,", "y, \"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self,", "y - 1), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self, frame,", "self.ports.update( { \"val\": InputPort(x - 1, y), OUTPUT_PORT_NAME: OutputPort(x + 1, y), }", "operation(self, frame, force=False): if not self.has_neighbor(BANG_GLYPH) and not force: return for port_name in", "self.y + offset_y if not self._grid.is_inside(new_x, new_y): self.explode() return collider = self._grid.peek(new_x, new_y)", "for port in self.ports.values(): if isinstance(port, OutputPort) and port.is_bang: continue logger.debug( \"Ops %s", "return False def move(self, offset_x, offset_y): new_x = self.x + offset_x new_y =", "grid, x, y, \"clock\", \"Outputs modulo of frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update( {", "\"Move eastwards or bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame,", "else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def _output(self, glyph, port=None): if port is None:", "offset_x, offset_y): new_x = self.x + offset_x new_y = self.y + offset_y if", "logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC): def __init__( self, grid, x, y, name,", "y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False):", "glyph.upper() if is_passive else glyph @abc.abstractmethod def operation(self, frame, force=False): \"\"\"Run the operator", "None: logger.warn(\"Trying to bang, but no output port.\") return else: glyph = BANG_GLYPH", "operation(self, frame, force=False): self.move(-1, 0) self.is_passive = False class Jymper(IOperator): def __init__(self, grid,", "\"rate\": InputPort(x - 1, y, clamp=lambda x: max(1, x)), \"mod\": InputPort(x + 1,", "frame, force=False): self.move(1, 0) self.is_passive = False class Generator(IOperator): def __init__(self, grid, x,", "if output_port is None: logger.warn(\"Trying to bang, but no output port.\") return else:", "self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator): def __init__(self, grid, x,", "mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = frame % (mod * rate) return value ==", "glyph=\"i\", is_passive=is_passive, ) self.ports.update( { \"step\": InputPort(x - 1, y, default=\"1\"), \"mod\": InputPort(x", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"track\", \"Reads", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"multiply\", \"Output", "grid, x, y, \"increment\", \"Increment operator southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update( { \"step\":", "InputPort(x - 1, y), \"max\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y +", "*, is_passive=False): super().__init__( grid, x, y, \"west\", \"Move westward or bang\", glyph=\"w\", is_passive=is_passive,", "False else: return True def _bang(self, payload): output_port = self._output_port if output_port is", "random value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update( { \"min\": InputPort(x - 1, y), \"max\":", "1)): if self._grid.peek(self.x + x, self.y + y) == glyph: return True return", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH,", "(BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase() self.x += offset_x self.y += offset_y self._grid.poke(self.x, self.y,", "output_port is None: logger.warn(\"Trying to bang, but no output port.\") return else: glyph", "self._output_port if output_port is None or not output_port.is_sensitive: return False else: right_port =", "y), \"y\": InputPort(x - 2, y), \"len\": InputPort(x - 1, y, clamp=lambda x:", "is_passive=is_passive, ) self.ports.update( { \"min\": InputPort(x - 1, y), \"max\": InputPort(x + 1,", ") def operation(self, frame, force=False): self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y) class", "description self.ports = {} self._grid = grid self.is_passive = is_passive self.do_draw = is_passive", "new_y): self.explode() return collider = self._grid.peek(new_x, new_y) if collider not in (BANG_GLYPH, DOT_GLYPH):", "of inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\":", "= (\"C\", \"c\", \"D\", \"d\", \"E\", \"F\", \"f\", \"G\", \"g\", \"A\", \"a\", \"B\")", "{ \"val\": InputPort(x - 1, y), OUTPUT_PORT_NAME: OutputPort(x + 1, y), } )", "+ 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self,", "+ 1) # self._output_port.x, self._output_port.y) class If(IOperator): def __init__(self, grid, x, y, *,", "is_passive=False): super().__init__( grid, x, y, \"y\", \"Outputs westward operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update(", "port.is_bang: continue logger.debug( \"Ops %s (%d, %d): locking port @ %d, %d\", self.name,", "force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self, grid, x, y, *,", "frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length): self._grid.lock(self.x", "inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x", "(1, 0), (0, -1), (0, 1)): if self._grid.peek(self.x + x, self.y + y)", "operator southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update( { \"step\": InputPort(x - 1, y, default=\"1\"),", "y, *, is_passive=False): super().__init__( grid, x, y, \"multiply\", \"Output multiplication of inputs\", glyph=\"m\",", "= glyph self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator): def __init__(self, grid, x, y, *,", "x, y, \"if\", \"Bang if inputs are equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( {", "name, description, *, glyph=DOT_GLYPH, is_passive=False ): self.x = x self.y = y self.name", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"west\", \"Move", "return self.erase() self.x += offset_x self.y += offset_y self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x,", "glyph=\"c\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda x: max(1,", "y, \"random\", \"Outputs random value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update( { \"min\": InputPort(x -", "southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update( { \"step\": InputPort(x - 1, y, default=\"1\"), \"mod\":", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"increment\", \"Increment operator southward\", glyph=\"i\",", "else: value = glyph self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator): def __init__(self, grid, x,", "MidiNoteOnEvent from orca.ports import InputPort, OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class", "y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def operation(self, frame, force=False):", "= InputPort(self.x + 1, self.y) value = self._grid.listen(right_port) if value.lower() == value.upper() or", "- 1, y, clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y + 1), }", "self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length): self._grid.lock(self.x + offset + 1,", "return self.name def run(self, frame, force=False): payload = self.operation(frame, force) for port in", "0) self.is_passive = False class Jymper(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "clamp=lambda x: min(max(0, x), 8) ), \"note\": InputPort(self.x + 3, self.y), \"velocity\": InputPort(", "y, *, is_passive=False): super().__init__( grid, x, y, \"midi\", \"Send MIDI note\", glyph=\":\", is_passive=True,", "assumed to match the state of the grid given at construction time.\"\"\" def", "i for i, k in enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self, grid, x, y,", "y, *, is_passive=False): super().__init__( grid, x, y, \"west\", \"Move westward or bang\", glyph=\"w\",", "y + 1), } ) def operation(self, frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length", "self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high) return self._grid.key_of(value) class South(IOperator): def __init__(self, grid, x,", "operator\", glyph=\"h\", is_passive=is_passive, ) def operation(self, frame, force=False): self._grid.lock(self.x, self.y + 1) #", "northward operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x, y - 1), OUTPUT_PORT_NAME:", "*, is_passive=False): super().__init__( grid, x, y, \"j\", \"Outputs northward operator\", glyph=\"f\", is_passive=is_passive, )", "output_port.is_bang: self._bang(payload) else: self._output(payload) def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y,", "+ 1, self.y), \"octave\": InputPort( self.x + 2, self.y, clamp=lambda x: min(max(0, x),", "clamp=lambda x: min(max(0, x), 32) ), } ) def operation(self, frame, force=False): if", "class Jumper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "= False class Track(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "self.x, self.y, port.x, port.y, ) self._grid.lock(port.x, port.y) output_port = self._output_port if output_port: if", "1, is_sensitive=True), } ) def operation(self, frame, force=False): index = self._grid.listen_as_value( self.ports[\"a\"] )", "= InputPort(self.x + 1 + key % length, self.y) return self._grid.listen(port) class West(IOperator):", "is_passive=False): super().__init__( grid, x, y, \"substract\", \"Output difference of inputs\", glyph=\"b\", is_passive=is_passive, )", "*, is_passive=False): super().__init__( grid, x, y, \"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, )", "operation(self, frame, force=False): self.move(0, -1) self.is_passive = False class Random(IOperator): def __init__(self, grid,", ") res = self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator): def __init__(self, grid, x, y,", "self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame / rate) % mod return self._grid.key_of(value)", "logger.warn(\"Trying to bang, but no output port.\") return else: glyph = BANG_GLYPH if", "class East(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "%s @ (%d, %d)\", self.name, self.x, self.y ) elif glyph is None: return", "self._grid.key_of(value) class South(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "% (mod if mod > 0 else 36)) class Jumper(IOperator): def __init__(self, grid,", "\"generator\", \"Write operands with offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update( { \"x\": InputPort(x -", "self.y), \"octave\": InputPort( self.x + 2, self.y, clamp=lambda x: min(max(0, x), 8) ),", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"substract\", \"Output difference of", "if port is None: output_port = self._output_port else: output_port = port if output_port", "of frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda", "orca.ports import InputPort, OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC): def", "is_passive=False): super().__init__( grid, x, y, \"multiply\", \"Output multiplication of inputs\", glyph=\"m\", is_passive=is_passive, )", "5, self.y, clamp=lambda x: min(max(0, x), 32) ), } ) def operation(self, frame,", "1, y, clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } )", "self.ports = {} self._grid = grid self.is_passive = is_passive self.do_draw = is_passive self.glyph", "is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(0, -1) self.is_passive =", "South(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"south\",", "is_passive=False): super().__init__( grid, x, y, \"increment\", \"Increment operator southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update(", "1, is_sensitive=True), } ) def operation(self, frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high =", "self._output_port if output_port is None: logger.warn(\"Trying to bang, but no output port.\") return", "grid, x, y, \"multiply\", \"Output multiplication of inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update( {", "def operation(self, frame, force=False): self.move(0, 1) self.is_passive = False class Track(IOperator): def __init__(self,", "+ offset + 1, self.y) port = InputPort(self.x + 1 + key %", "self.y) return self._grid.listen(port) class West(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "} ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value", "glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.do_draw = False", "Multiply(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"multiply\",", "return True return False def move(self, offset_x, offset_y): new_x = self.x + offset_x", "+ step) % (mod if mod > 0 else 36)) class Jumper(IOperator): def", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"west\", \"Move westward or", "def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame", "frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) + 1", "> 15: return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"]) length = self._grid.listen_as_value(self.ports[\"length\"]) self._grid.push_midi(MidiNoteOnEvent(channel,", "port.y, ) self._grid.lock(port.x, port.y) output_port = self._output_port if output_port: if output_port.is_bang: self._bang(payload) else:", "the given frame and return the payload. This may modify the grid. Note:", "% mod return self._grid.key_of(value) class Delay(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "in self.ports.values(): if isinstance(port, OutputPort) and port.is_bang: continue logger.debug( \"Ops %s (%d, %d):", ") self.do_draw = False def operation(self, frame, force=False): self.move(0, -1) self.is_passive = False", "self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "of inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\":", "port=None): if port is None: output_port = self._output_port else: output_port = port if", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"y\", \"Outputs", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"half\", \"Halts southward", "if output_port: if output_port.is_bang: self._bang(payload) else: self._output(payload) def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def", "(0, -1), (0, 1)): if self._grid.peek(self.x + x, self.y + y) == glyph:", "def has_neighbor(self, glyph): for x, y in ((-1, 0), (1, 0), (0, -1),", "+ 1), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class", "def operation(self, frame, force=False): self.move(-1, 0) self.is_passive = False class Jymper(IOperator): def __init__(self,", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"if\", \"Bang if", "port.y) output_port = self._output_port if output_port: if output_port.is_bang: self._bang(payload) else: self._output(payload) def erase(self):", "grid, x, y, \"add\", \"Output sum of inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update( {", "y, \"track\", \"Reads eastward operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update( { \"key\": InputPort(x -", "x, y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self,", "port.x, port.y, ) self._grid.lock(port.x, port.y) output_port = self._output_port if output_port: if output_port.is_bang: self._bang(payload)", "or value.upper() != value: return False else: return True def _bang(self, payload): output_port", "glyph.upper() else: value = glyph self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator): def __init__(self, grid,", "of frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda", "return else: glyph = BANG_GLYPH if payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def", "NOTE_TO_INDEX = {k: i for i, k in enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self,", "port_name in \"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"])", "self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset in range(length): input_port = InputPort(self.x", "output port for operator %s @ (%d, %d)\", self.name, self.x, self.y ) elif", ") self.ports.update( { \"step\": InputPort(x - 1, y, default=\"1\"), \"mod\": InputPort(x + 1,", "\"\"\"Run the operator for the given frame and return the payload. This may", "\"octave\": InputPort( self.x + 2, self.y, clamp=lambda x: min(max(0, x), 8) ), \"note\":", "has_neighbor(self, glyph): for x, y in ((-1, 0), (1, 0), (0, -1), (0,", "y) == glyph: return True return False def move(self, offset_x, offset_y): new_x =", "output_port = self._output_port if output_port is None or not output_port.is_sensitive: return False else:", ") + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator): def __init__(self, grid, x, y, *,", "else: output_port = port if output_port is None: logging.warn( \"No output port for", "force=False): self.move(-1, 0) self.is_passive = False class Jymper(IOperator): def __init__(self, grid, x, y,", "operation(self, frame, force=False): self._grid.lock(self.x, self.y) for x in range(self.x + 1, self._grid.cols): self._grid.lock(x,", "= self.x + offset_x new_y = self.y + offset_y if not self._grid.is_inside(new_x, new_y):", "None or not output_port.is_sensitive: return False else: right_port = InputPort(self.x + 1, self.y)", "return a == b class Increment(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "= self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step) %", "frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a)) class", "class IOperator(abc.ABC): def __init__( self, grid, x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False", "self.description = description self.ports = {} self._grid = grid self.is_passive = is_passive self.do_draw", "\"Move southward or bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame,", "\"midi\", \"Send MIDI note\", glyph=\":\", is_passive=True, ) self.ports.update( { \"channel\": InputPort(self.x + 1,", "self.is_passive = False class Generator(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "super().__init__( grid, x, y, \"west\", \"Move westward or bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw", "y, \"generator\", \"Write operands with offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update( { \"x\": InputPort(x", "return self._grid.listen(port) class West(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(-1, 0) self.is_passive =", "is None or not output_port.is_sensitive: return False else: right_port = InputPort(self.x + 1,", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"delay\", \"Bangs on module of", "y in ((-1, 0), (1, 0), (0, -1), (0, 1)): if self._grid.peek(self.x +", "+ offset_x new_y = self.y + offset_y if not self._grid.is_inside(new_x, new_y): self.explode() return", "x + offset, self.y + y) self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\": output_port, }", "y, *, is_passive=False): super().__init__( grid, x, y, \"generator\", \"Write operands with offset\", glyph=\"g\",", "x, y, \"midi\", \"Send MIDI note\", glyph=\":\", is_passive=True, ) self.ports.update( { \"channel\": InputPort(self.x", "operator for the given frame and return the payload. This may modify the", "or bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(-1,", "force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high) return self._grid.key_of(value)", "*, is_passive=False): super().__init__( grid, x, y, \"add\", \"Output sum of inputs\", glyph=\"a\", is_passive=is_passive", "y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"])", "a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b) class North(IOperator): def", "return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"]) length = self._grid.listen_as_value(self.ports[\"length\"]) self._grid.push_midi(MidiNoteOnEvent(channel, octave, note,", "= logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC): def __init__( self, grid, x, y,", "force) for port in self.ports.values(): if isinstance(port, OutputPort) and port.is_bang: continue logger.debug( \"Ops", "1, y, default=\"1\"), \"mod\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1,", "frame, force=False): \"\"\"Run the operator for the given frame and return the payload.", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"if\", \"Bang if inputs are", "frame, force=False): payload = self.operation(frame, force) for port in self.ports.values(): if isinstance(port, OutputPort)", "rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = frame % (mod * rate)", "or bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(0,", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"y\", \"Outputs westward operator\", glyph=\"y\",", "+ offset_y if not self._grid.is_inside(new_x, new_y): self.explode() return collider = self._grid.peek(new_x, new_y) if", "Generator(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"generator\",", "{ \"step\": InputPort(x - 1, y, default=\"1\"), \"mod\": InputPort(x + 1, y), OUTPUT_PORT_NAME:", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"clock\", \"Outputs modulo of frame\",", "), \"note\": InputPort(self.x + 3, self.y), \"velocity\": InputPort( self.x + 4, self.y, default=\"f\",", ") def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value =", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"multiply\", \"Output multiplication of inputs\",", "= self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high) return self._grid.key_of(value) class South(IOperator): def __init__(self, grid,", "self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME in self.ports def _should_upper_case(self): output_port = self._output_port if", "port in self.ports.values(): if isinstance(port, OutputPort) and port.is_bang: continue logger.debug( \"Ops %s (%d,", "def operation(self, frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME])", "def operation(self, frame, force=False): self.move(1, 0) self.is_passive = False class Generator(IOperator): def __init__(self,", "self.y) if self._grid.peek(x, self.y) == self.glyph: break _NOTES_VALUES = (\"C\", \"c\", \"D\", \"d\",", "super().__init__( grid, x, y, \"increment\", \"Increment operator southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update( {", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH,", "y, *, is_passive=False): super().__init__( grid, x, y, \"if\", \"Bang if inputs are equal\",", "self.do_draw = False def operation(self, frame, force=False): self.move(0, 1) self.is_passive = False class", "glyph=\"t\", is_passive=is_passive, ) self.ports.update( { \"key\": InputPort(x - 2, y), \"len\": InputPort(x -", "if isinstance(port, OutputPort) and port.is_bang: continue logger.debug( \"Ops %s (%d, %d): locking port", "glyph=\":\", is_passive=True, ) self.ports.update( { \"channel\": InputPort(self.x + 1, self.y), \"octave\": InputPort( self.x", "modify the grid. Note: the frame is assumed to match the state of", "value = random.randint(low, high) return self._grid.key_of(value) class South(IOperator): def __init__(self, grid, x, y,", "InputPort(x, y - 1), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self,", "glyph self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "frame, force=False): self.move(0, -1) self.is_passive = False class Random(IOperator): def __init__(self, grid, x,", "super().__init__( grid, x, y, \"clock\", \"Outputs modulo of frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update(", "1), } ) def operation(self, frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"])", "in enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "if not self._grid.is_inside(new_x, new_y): self.explode() return collider = self._grid.peek(new_x, new_y) if collider not", "OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC): def __init__( self, grid, x, y, name, description,", "self.x += offset_x self.y += offset_y self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x,", "OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self, frame, force=False): key =", "\"add\", \"Output sum of inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update( { \"a\": InputPort(x -", "\"Bangs on module of frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x -", "x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False ): self.x = x self.y =", "return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "name self.description = description self.ports = {} self._grid = grid self.is_passive = is_passive", "class If(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "grid, x, y, \"random\", \"Outputs random value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update( { \"min\":", "\"random\", \"Outputs random value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update( { \"min\": InputPort(x - 1,", "self.move(0, -1) self.is_passive = False class Random(IOperator): def __init__(self, grid, x, y, *,", "x: min(max(0, x), 8) ), \"note\": InputPort(self.x + 3, self.y), \"velocity\": InputPort( self.x", "Clock(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"clock\",", "at construction time.\"\"\" def __str__(self): return self.name def run(self, frame, force=False): payload =", "is None: logger.warn(\"Trying to bang, but no output port.\") return else: glyph =", "InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } )", "is_sensitive=True), } ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"])", "OUTPUT_PORT_NAME in self.ports def _should_upper_case(self): output_port = self._output_port if output_port is None or", "((-1, 0), (1, 0), (0, -1), (0, 1)): if self._grid.peek(self.x + x, self.y", "== 0 or mod == 1 class East(IOperator): def __init__(self, grid, x, y,", "self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "value == 0 or mod == 1 class East(IOperator): def __init__(self, grid, x,", "0) self.is_passive = False class Generator(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "high) return self._grid.key_of(value) class South(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "== glyph: return True return False def move(self, offset_x, offset_y): new_x = self.x", "Halt(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"half\",", "self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"])", "InputPort(self.x + offset + 1, self.y) output_port = OutputPort(self.x + x + offset,", "self._grid.is_inside(new_x, new_y): self.explode() return collider = self._grid.peek(new_x, new_y) if collider not in (BANG_GLYPH,", "self._grid.lock(x, self.y) if self._grid.peek(x, self.y) == self.glyph: break _NOTES_VALUES = (\"C\", \"c\", \"D\",", "super().__init__( grid, x, y, \"if\", \"Bang if inputs are equal\", glyph=\"f\", is_passive=is_passive, )", ") self.do_draw = False def operation(self, frame, force=False): self.move(0, 1) self.is_passive = False", ") def operation(self, frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for offset", "min(max(0, x), 8) ), \"note\": InputPort(self.x + 3, self.y), \"velocity\": InputPort( self.x +", "bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(0, -1)", "self.x, self.y ) elif glyph is None: return else: if self._should_upper_case(): value =", "frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self, grid, x, y,", "frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self, grid, x, y,", "\"key\": InputPort(x - 2, y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(1,", "= self._output_port if output_port: if output_port.is_bang: self._bang(payload) else: self._output(payload) def erase(self): self._grid.poke(self.x, self.y,", "offset_y): new_x = self.x + offset_x new_y = self.y + offset_y if not", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"south\", \"Move southward or bang\",", "y, *, is_passive=False): super().__init__( grid, x, y, \"y\", \"Outputs westward operator\", glyph=\"y\", is_passive=is_passive,", "\"B\") NOTE_TO_INDEX = {k: i for i, k in enumerate(_NOTES_VALUES)} class Midi(IOperator): def", "if channel > 15: return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"]) length =", "is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda x: max(1, x)),", "self.do_draw = is_passive self.glyph = glyph.upper() if is_passive else glyph @abc.abstractmethod def operation(self,", "False def operation(self, frame, force=False): self._grid.lock(self.x, self.y) for x in range(self.x + 1,", "): self.x = x self.y = y self.name = name self.description = description", "= self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a)) class Clock(IOperator): def __init__(self, grid, x, y,", "operation(self, frame, force=False): self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y) class If(IOperator): def", "self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return channel =", "math.floor(frame / rate) % mod return self._grid.key_of(value) class Delay(IOperator): def __init__(self, grid, x,", "y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"])", "y), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator):", "import abc import logging import math import random from orca.grid import BANG_GLYPH, COMMENT_GLYPH,", "given frame and return the payload. This may modify the grid. Note: the", "output_port.y, value) class Add(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "payload): output_port = self._output_port if output_port is None: logger.warn(\"Trying to bang, but no", "y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), }", "= self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step) % (mod if mod > 0 else", "(mod * rate) return value == 0 or mod == 1 class East(IOperator):", "Jymper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"y\",", "y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def operation(self, frame,", "self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b) class North(IOperator): def __init__(self, grid, x, y, *,", "self.x + 2, self.y, clamp=lambda x: min(max(0, x), 8) ), \"note\": InputPort(self.x +", "self._grid.lock(self.x + offset + 1, self.y) port = InputPort(self.x + 1 + key", "length = self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset", "(mod if mod > 0 else 36)) class Jumper(IOperator): def __init__(self, grid, x,", "InputPort(self.x + 1, self.y), \"octave\": InputPort( self.x + 2, self.y, clamp=lambda x: min(max(0,", "InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } )", "*, is_passive=False): super().__init__( grid, x, y, \"multiply\", \"Output multiplication of inputs\", glyph=\"m\", is_passive=is_passive,", "*, is_passive=False): super().__init__( grid, x, y, \"midi\", \"Send MIDI note\", glyph=\":\", is_passive=True, )", "self._output_port.y) class If(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "offset in range(length): self._grid.lock(self.x + offset + 1, self.y) port = InputPort(self.x +", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"half\", \"Halts", "grid, x, y, \"east\", \"Move eastwards or bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw =", "self.x + offset_x new_y = self.y + offset_y if not self._grid.is_inside(new_x, new_y): self.explode()", "high = self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high) return self._grid.key_of(value) class South(IOperator): def __init__(self,", "InputPort, OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC): def __init__( self,", "= is_passive self.glyph = glyph.upper() if is_passive else glyph @abc.abstractmethod def operation(self, frame,", "1, self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x, self.y) == self.glyph: break _NOTES_VALUES = (\"C\",", "32) ), } ) def operation(self, frame, force=False): if not self.has_neighbor(BANG_GLYPH) and not", "operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x, y - 1), OUTPUT_PORT_NAME: OutputPort(x,", "{ \"min\": InputPort(x - 1, y), \"max\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x,", "x, y, \"add\", \"Output sum of inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update( { \"a\":", "\"Outputs random value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update( { \"min\": InputPort(x - 1, y),", ") def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self,", "output_port = self._output_port if output_port is None: logger.warn(\"Trying to bang, but no output", "is_passive=False): super().__init__( grid, x, y, \"south\", \"Move southward or bang\", glyph=\"s\", is_passive=is_passive, )", "grid self.is_passive = is_passive self.do_draw = is_passive self.glyph = glyph.upper() if is_passive else", "is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x + 1,", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"increment\", \"Increment", "= self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator): def __init__(self, grid,", "False class Generator(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "operation(self, frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high)", "grid, x, y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw = False def", "in (BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase() self.x += offset_x self.y += offset_y self._grid.poke(self.x,", "super().__init__( grid, x, y, \"j\", \"Outputs northward operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( {", "glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x +", "+ y) self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\": output_port, } ) res = self._grid.listen(input_port)", "1, self.y) output_port = OutputPort(self.x + x + offset, self.y + y) self.ports.update(", "OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y)", "y, *, is_passive=False): super().__init__( grid, x, y, \"random\", \"Outputs random value\", glyph=\"r\", is_passive=is_passive,", "glyph=\"h\", is_passive=is_passive, ) def operation(self, frame, force=False): self._grid.lock(self.x, self.y + 1) # self._output_port.x,", "enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "self.y, port.x, port.y, ) self._grid.lock(port.x, port.y) output_port = self._output_port if output_port: if output_port.is_bang:", "is None: return else: if self._should_upper_case(): value = glyph.upper() else: value = glyph", "y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y", "self.ports.values(): if isinstance(port, OutputPort) and port.is_bang: continue logger.debug( \"Ops %s (%d, %d): locking", "OutputPort(self.x + x + offset, self.y + y) self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\":", "- a)) class Clock(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "clamp=lambda x: max(x, 1)), } ) def operation(self, frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"])", "is None: logging.warn( \"No output port for operator %s @ (%d, %d)\", self.name,", "\"note\": InputPort(self.x + 3, self.y), \"velocity\": InputPort( self.x + 4, self.y, default=\"f\", clamp=lambda", "= False def operation(self, frame, force=False): self._grid.lock(self.x, self.y) for x in range(self.x +", "output port.\") return else: glyph = BANG_GLYPH if payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y,", "output_port.is_sensitive: return False else: right_port = InputPort(self.x + 1, self.y) value = self._grid.listen(right_port)", "*, is_passive=False): super().__init__( grid, x, y, \"y\", \"Outputs westward operator\", glyph=\"y\", is_passive=is_passive, )", "+ 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def", "1, self.y), \"octave\": InputPort( self.x + 2, self.y, clamp=lambda x: min(max(0, x), 8)", "), } ) def operation(self, frame, force=False): if not self.has_neighbor(BANG_GLYPH) and not force:", "glyph=DOT_GLYPH, is_passive=False ): self.x = x self.y = y self.name = name self.description", "class Midi(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "y, \"east\", \"Move eastwards or bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw = False def", "self.y, BANG_GLYPH) def has_neighbor(self, glyph): for x, y in ((-1, 0), (1, 0),", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"generator\", \"Write", "IOperator(abc.ABC): def __init__( self, grid, x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False ):", "frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b) class", "1, y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True),", "if is_passive else glyph @abc.abstractmethod def operation(self, frame, force=False): \"\"\"Run the operator for", "in self.ports def _should_upper_case(self): output_port = self._output_port if output_port is None or not", "self.is_passive = False class Random(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "self.ports.update( { \"val\": InputPort(x, y - 1), OUTPUT_PORT_NAME: OutputPort(x, y + 1), }", "glyph=\"d\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda x: max(1,", "self.y + 1) # self._output_port.x, self._output_port.y) class If(IOperator): def __init__(self, grid, x, y,", "min(max(0, x), 32) ), } ) def operation(self, frame, force=False): if not self.has_neighbor(BANG_GLYPH)", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"j\", \"Outputs northward operator\",", "self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a)) class Clock(IOperator): def __init__(self, grid, x, y, *,", "is_passive=False): super().__init__( grid, x, y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw =", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"east\", \"Move eastwards", "is_passive=False): super().__init__( grid, x, y, \"clock\", \"Outputs modulo of frame\", glyph=\"c\", is_passive=is_passive, )", "InputPort(x - 2, y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(x, 1)),", "glyph=\"m\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x +", "x)), \"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),", "y, *, is_passive=False): super().__init__( grid, x, y, \"north\", \"Move northward or bang\", glyph=\"n\",", "self.y) == self.glyph: break _NOTES_VALUES = (\"C\", \"c\", \"D\", \"d\", \"E\", \"F\", \"f\",", "note\", glyph=\":\", is_passive=True, ) self.ports.update( { \"channel\": InputPort(self.x + 1, self.y), \"octave\": InputPort(", "operation(self, frame, force=False): self.do_draw = False self.erase() class Comment(IOperator): def __init__(self, grid, x,", "collider not in (BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase() self.x += offset_x self.y +=", "for operator %s @ (%d, %d)\", self.name, self.x, self.y ) elif glyph is", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"substract\", \"Output difference", "return note = self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel", "= {} self._grid = grid self.is_passive = is_passive self.do_draw = is_passive self.glyph =", "super().__init__( grid, x, y, \"substract\", \"Output difference of inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update(", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"multiply\", \"Output multiplication", "is_passive=is_passive, ) def operation(self, frame, force=False): self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y)", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"increment\", \"Increment operator", "1, y, clamp=lambda x: max(x, 1)), } ) def operation(self, frame, force=False): length", "= self._output_port else: output_port = port if output_port is None: logging.warn( \"No output", "3, self.y), \"velocity\": InputPort( self.x + 4, self.y, default=\"f\", clamp=lambda x: min(max(0, x),", "@abc.abstractmethod def operation(self, frame, force=False): \"\"\"Run the operator for the given frame and", "output_port, } ) res = self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator): def __init__(self, grid,", "+= offset_y self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property def _output_port(self):", "self.is_passive = False class Jymper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "class Delay(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "Add(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"add\",", "y, \"midi\", \"Send MIDI note\", glyph=\":\", is_passive=True, ) self.ports.update( { \"channel\": InputPort(self.x +", "self.x + 5, self.y, clamp=lambda x: min(max(0, x), 32) ), } ) def", "a)) class Clock(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self, glyph): for x, y in", "+ offset + 1, self.y) output_port = OutputPort(self.x + x + offset, self.y", "Random(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"random\",", "= self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset in range(length): input_port = InputPort(self.x + offset", "eastwards or bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False):", "value.upper() != value: return False else: return True def _bang(self, payload): output_port =", "min(max(0, x), 16) ), \"length\": InputPort( self.x + 5, self.y, clamp=lambda x: min(max(0,", "is_passive else glyph @abc.abstractmethod def operation(self, frame, force=False): \"\"\"Run the operator for the", "super().__init__( grid, x, y, \"generator\", \"Write operands with offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update(", "random.randint(low, high) return self._grid.key_of(value) class South(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "new_x = self.x + offset_x new_y = self.y + offset_y if not self._grid.is_inside(new_x,", "self._grid.key_of((out + step) % (mod if mod > 0 else 36)) class Jumper(IOperator):", "super().__init__( grid, x, y, \"add\", \"Output sum of inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update(", "\"south\", \"Move southward or bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw = False def operation(self,", "operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x - 1, y), OUTPUT_PORT_NAME: OutputPort(x", "\"Ops %s (%d, %d): locking port @ %d, %d\", self.name, self.x, self.y, port.x,", "} ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def", "self._output(payload) def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self,", "rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame / rate) % mod", "y, *, is_passive=False): super().__init__( grid, x, y, \"half\", \"Halts southward operator\", glyph=\"h\", is_passive=is_passive,", "= self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame / rate) % mod return self._grid.key_of(value) class Delay(IOperator):", "def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME in self.ports def _should_upper_case(self): output_port", "self._grid = grid self.is_passive = is_passive self.do_draw = is_passive self.glyph = glyph.upper() if", "x: min(max(0, x), 16) ), \"length\": InputPort( self.x + 5, self.y, clamp=lambda x:", "This may modify the grid. Note: the frame is assumed to match the", "\"Increment operator southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update( { \"step\": InputPort(x - 1, y,", "eastward operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update( { \"key\": InputPort(x - 2, y), \"len\":", "= self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"]) length = self._grid.listen_as_value(self.ports[\"length\"]) self._grid.push_midi(MidiNoteOnEvent(channel, octave, note, velocity, length))", "InputPort(self.x + 3, self.y), \"velocity\": InputPort( self.x + 4, self.y, default=\"f\", clamp=lambda x:", "self.operation(frame, force) for port in self.ports.values(): if isinstance(port, OutputPort) and port.is_bang: continue logger.debug(", "match the state of the grid given at construction time.\"\"\" def __str__(self): return", "\"No output port for operator %s @ (%d, %d)\", self.name, self.x, self.y )", "force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self, grid, x, y, *,", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"j\", \"Outputs northward operator\", glyph=\"f\",", "self._grid.lock(self.x, self.y) @property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME in self.ports", "return OUTPUT_PORT_NAME in self.ports def _should_upper_case(self): output_port = self._output_port if output_port is None", "= self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step) % (mod if mod", "is_sensitive=True), } ) def operation(self, frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"])", "payload. This may modify the grid. Note: the frame is assumed to match", "-1), (0, 1)): if self._grid.peek(self.x + x, self.y + y) == glyph: return", "= self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b) class North(IOperator): def __init__(self,", "f\"input{offset}\": input_port, f\"output{offset}\": output_port, } ) res = self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator):", "construction time.\"\"\" def __str__(self): return self.name def run(self, frame, force=False): payload = self.operation(frame,", "x)), \"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True),", "x self.y = y self.name = name self.description = description self.ports = {}", "return self._grid.key_of(index) class Substract(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "self.ports.update( { \"min\": InputPort(x - 1, y), \"max\": InputPort(x + 1, y), OUTPUT_PORT_NAME:", "if value.lower() == value.upper() or value.upper() != value: return False else: return True", "max(1, x)), \"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1,", "of inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\":", "\"min\": InputPort(x - 1, y), \"max\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y", ") def operation(self, frame, force=False): index = self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return", "self.y, default=\"f\", clamp=lambda x: min(max(0, x), 16) ), \"length\": InputPort( self.x + 5,", "\"Output multiplication of inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1,", "self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step) % (mod if mod > 0 else 36))", "= x self.y = y self.name = name self.description = description self.ports =", "== value.upper() or value.upper() != value: return False else: return True def _bang(self,", "self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self, glyph): for x,", "offset + 1, self.y) port = InputPort(self.x + 1 + key % length,", "is_passive=False): super().__init__( grid, x, y, \"delay\", \"Bangs on module of frame\", glyph=\"d\", is_passive=is_passive,", "\"substract\", \"Output difference of inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x -", "glyph is None: return else: if self._should_upper_case(): value = glyph.upper() else: value =", "self.y): self._grid.lock(self.x, self.y) @property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME in", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"half\", \"Halts southward operator\",", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"generator\", \"Write operands with offset\",", "frame, force=False): self.do_draw = False self.erase() class Comment(IOperator): def __init__(self, grid, x, y,", "class Comment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "= self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length): self._grid.lock(self.x + offset +", "on module of frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1,", "out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step) % (mod if mod > 0", "y, \"north\", \"Move northward or bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw = False def", "self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return a == b class Increment(IOperator): def __init__(self, grid,", "b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a)) class Clock(IOperator): def __init__(self, grid, x,", "(%d, %d)\", self.name, self.x, self.y ) elif glyph is None: return else: if", "\"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } )", "clamp=lambda x: max(1, x)), \"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y", "no output port.\") return else: glyph = BANG_GLYPH if payload else DOT_GLYPH self._grid.poke(output_port.x,", "= self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset in range(length): input_port =", "b = self._grid.listen(self.ports[\"b\"]) return a == b class Increment(IOperator): def __init__(self, grid, x,", "return else: if self._should_upper_case(): value = glyph.upper() else: value = glyph self._grid.poke(output_port.x, output_port.y,", "= {k: i for i, k in enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self, grid,", "is_passive=False ): self.x = x self.y = y self.name = name self.description =", "= math.floor(frame / rate) % mod return self._grid.key_of(value) class Delay(IOperator): def __init__(self, grid,", "= glyph.upper() if is_passive else glyph @abc.abstractmethod def operation(self, frame, force=False): \"\"\"Run the", "self._grid.peek(x, self.y) == self.glyph: break _NOTES_VALUES = (\"C\", \"c\", \"D\", \"d\", \"E\", \"F\",", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"delay\", \"Bangs on", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"east\", \"Move", "def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self, grid,", "and port.is_bang: continue logger.debug( \"Ops %s (%d, %d): locking port @ %d, %d\",", "x: max(1, x)), \"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y +", "class Random(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self, glyph):", "} ) def operation(self, frame, force=False): if not self.has_neighbor(BANG_GLYPH) and not force: return", "southward operator\", glyph=\"h\", is_passive=is_passive, ) def operation(self, frame, force=False): self._grid.lock(self.x, self.y + 1)", "+ y) == glyph: return True return False def move(self, offset_x, offset_y): new_x", "self._output_port.x, self._output_port.y) class If(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", ") def operation(self, frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out =", "glyph): for x, y in ((-1, 0), (1, 0), (0, -1), (0, 1)):", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"midi\", \"Send", "{ f\"input{offset}\": input_port, f\"output{offset}\": output_port, } ) res = self._grid.listen(input_port) self._output(res, output_port) class", "* rate) return value == 0 or mod == 1 class East(IOperator): def", "@ %d, %d\", self.name, self.x, self.y, port.x, port.y, ) self._grid.lock(port.x, port.y) output_port =", "{ \"val\": InputPort(x, y - 1), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } )", ") self.do_draw = False def operation(self, frame, force=False): self.move(1, 0) self.is_passive = False", "\"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } )", "equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x", "OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): index =", "y, \"delay\", \"Bangs on module of frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update( { \"rate\":", "port.\") return else: glyph = BANG_GLYPH if payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph)", "is_passive=False): super().__init__( grid, x, y, \"j\", \"Outputs northward operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update(", "self.x = x self.y = y self.name = name self.description = description self.ports", "frame, force=False): self.move(-1, 0) self.is_passive = False class Jymper(IOperator): def __init__(self, grid, x,", "value) class Add(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b) class North(IOperator): def __init__(self, grid,", "in range(self.x + 1, self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x, self.y) == self.glyph: break", "self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\": output_port, } ) res = self._grid.listen(input_port) self._output(res, output_port)", "== DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"])", "Note: the frame is assumed to match the state of the grid given", "= \"output\" class IOperator(abc.ABC): def __init__( self, grid, x, y, name, description, *,", "k in enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "+ 5, self.y, clamp=lambda x: min(max(0, x), 32) ), } ) def operation(self,", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"half\", \"Halts southward operator\", glyph=\"h\",", "logging import math import random from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from", "self._grid.lock(port.x, port.y) output_port = self._output_port if output_port: if output_port.is_bang: self._bang(payload) else: self._output(payload) def", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive,", "self.ports def _should_upper_case(self): output_port = self._output_port if output_port is None or not output_port.is_sensitive:", "If(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"if\",", "is_passive=False): super().__init__( grid, x, y, \"if\", \"Bang if inputs are equal\", glyph=\"f\", is_passive=is_passive,", "x, y, \"y\", \"Outputs westward operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x", "= is_passive self.do_draw = is_passive self.glyph = glyph.upper() if is_passive else glyph @abc.abstractmethod", "x, y, \"half\", \"Halts southward operator\", glyph=\"h\", is_passive=is_passive, ) def operation(self, frame, force=False):", "= name self.description = description self.ports = {} self._grid = grid self.is_passive =", "x in range(self.x + 1, self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x, self.y) == self.glyph:", "the state of the grid given at construction time.\"\"\" def __str__(self): return self.name", "or not output_port.is_sensitive: return False else: right_port = InputPort(self.x + 1, self.y) value", "East(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"east\",", "force=False): self.move(0, -1) self.is_passive = False class Random(IOperator): def __init__(self, grid, x, y,", "} ) def operation(self, frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value", "grid, x, y, \"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw = False", "\"clock\", \"Outputs modulo of frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x -", "NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15: return octave = self._grid.listen_as_value(self.ports[\"octave\"])", "break _NOTES_VALUES = (\"C\", \"c\", \"D\", \"d\", \"E\", \"F\", \"f\", \"G\", \"g\", \"A\",", "class Bang(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "*, is_passive=False): super().__init__( grid, x, y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw", "super().__init__( grid, x, y, \"north\", \"Move northward or bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw", "= self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length): self._grid.lock(self.x + offset + 1, self.y) port", "grid, x, y, \"substract\", \"Output difference of inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update( {", "return self._grid.key_of((out + step) % (mod if mod > 0 else 36)) class", "*, is_passive=False): super().__init__( grid, x, y, \"random\", \"Outputs random value\", glyph=\"r\", is_passive=is_passive, )", "= False class Random(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "} ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def", "y, \"increment\", \"Increment operator southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update( { \"step\": InputPort(x -", "\"output\" class IOperator(abc.ABC): def __init__( self, grid, x, y, name, description, *, glyph=DOT_GLYPH,", "y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"])", "x, y, \"west\", \"Move westward or bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw = False", "OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): low =", ") def operation(self, frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y =", "1) # self._output_port.x, self._output_port.y) class If(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def", "\"half\", \"Halts southward operator\", glyph=\"h\", is_passive=is_passive, ) def operation(self, frame, force=False): self._grid.lock(self.x, self.y", "port @ %d, %d\", self.name, self.x, self.y, port.x, port.y, ) self._grid.lock(port.x, port.y) output_port", "not in (BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase() self.x += offset_x self.y += offset_y", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"if\", \"Bang if inputs", "for i, k in enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self, grid, x, y, *,", "self.y, clamp=lambda x: min(max(0, x), 32) ), } ) def operation(self, frame, force=False):", "f\"output{offset}\": output_port, } ) res = self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator): def __init__(self,", "\"len\": InputPort(x - 1, y, clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y +", "def move(self, offset_x, offset_y): new_x = self.x + offset_x new_y = self.y +", "def __str__(self): return self.name def run(self, frame, force=False): payload = self.operation(frame, force) for", "return self._grid.key_of(value) class South(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME in self.ports def _should_upper_case(self): output_port = self._output_port", "\"y\", \"Outputs westward operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x - 1,", "output_port: if output_port.is_bang: self._bang(payload) else: self._output(payload) def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self):", "y, \"y\", \"Outputs westward operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x -", "class Add(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "right_port = InputPort(self.x + 1, self.y) value = self._grid.listen(right_port) if value.lower() == value.upper()", "operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self, grid, x,", "def operation(self, frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"])", "is_passive=is_passive, ) self.ports.update( { \"key\": InputPort(x - 2, y), \"len\": InputPort(x - 1,", "isinstance(port, OutputPort) and port.is_bang: continue logger.debug( \"Ops %s (%d, %d): locking port @", "new_y = self.y + offset_y if not self._grid.is_inside(new_x, new_y): self.explode() return collider =", "- 1, y, clamp=lambda x: max(x, 1)), } ) def operation(self, frame, force=False):", "\"Outputs westward operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x - 1, y),", "1), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator):", "= False self.erase() class Comment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "+ 1), } ) def operation(self, frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length =", "x: min(max(0, x), 32) ), } ) def operation(self, frame, force=False): if not", "self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset in range(length):", "+= offset_x self.y += offset_y self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y)", "self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high) return self._grid.key_of(value) class South(IOperator): def", "is assumed to match the state of the grid given at construction time.\"\"\"", "self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator): def __init__(self, grid, x, y,", "if output_port is None: logging.warn( \"No output port for operator %s @ (%d,", "may modify the grid. Note: the frame is assumed to match the state", "0), (1, 0), (0, -1), (0, 1)): if self._grid.peek(self.x + x, self.y +", "x, y, \"track\", \"Reads eastward operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update( { \"key\": InputPort(x", "def _has_output_port(self): return OUTPUT_PORT_NAME in self.ports def _should_upper_case(self): output_port = self._output_port if output_port", "def operation(self, frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low,", "2, y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x,", "operation(self, frame, force=False): self.move(0, 1) self.is_passive = False class Track(IOperator): def __init__(self, grid,", "x, y, \"north\", \"Move northward or bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw = False", "1, is_sensitive=True), } ) def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b =", "operation(self, frame, force=False): self.move(1, 0) self.is_passive = False class Generator(IOperator): def __init__(self, grid,", "not NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15: return octave =", "DOT_GLYPH, MidiNoteOnEvent from orca.ports import InputPort, OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\"", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"j\", \"Outputs", "offset_x new_y = self.y + offset_y if not self._grid.is_inside(new_x, new_y): self.explode() return collider", "\"val\": InputPort(x, y - 1), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def", "y, *, is_passive=False): super().__init__( grid, x, y, \"increment\", \"Increment operator southward\", glyph=\"i\", is_passive=is_passive,", "bang, but no output port.\") return else: glyph = BANG_GLYPH if payload else", "operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame /", "\"a\", \"B\") NOTE_TO_INDEX = {k: i for i, k in enumerate(_NOTES_VALUES)} class Midi(IOperator):", "\"Outputs northward operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x, y - 1),", "glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x, y - 1), OUTPUT_PORT_NAME: OutputPort(x, y", "random from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports import InputPort, OutputPort", "but no output port.\") return else: glyph = BANG_GLYPH if payload else DOT_GLYPH", "self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "self.x + 4, self.y, default=\"f\", clamp=lambda x: min(max(0, x), 16) ), \"length\": InputPort(", "else: if self._should_upper_case(): value = glyph.upper() else: value = glyph self._grid.poke(output_port.x, output_port.y, value)", "input_port = InputPort(self.x + offset + 1, self.y) output_port = OutputPort(self.x + x", "y, *, is_passive=False): super().__init__( grid, x, y, \"track\", \"Reads eastward operand\", glyph=\"t\", is_passive=is_passive,", "else: self._output(payload) def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def", "self.explode() return collider = self._grid.peek(new_x, new_y) if collider not in (BANG_GLYPH, DOT_GLYPH): self.explode()", "return self._grid.key_of(a * b) class North(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "locking port @ %d, %d\", self.name, self.x, self.y, port.x, port.y, ) self._grid.lock(port.x, port.y)", "*, is_passive=False): super().__init__( grid, x, y, \"if\", \"Bang if inputs are equal\", glyph=\"f\",", "the grid. Note: the frame is assumed to match the state of the", "- 1, y, default=\"1\"), \"mod\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y +", ") self.ports.update( { \"val\": InputPort(x, y - 1), OUTPUT_PORT_NAME: OutputPort(x, y + 1),", "+ 1 for offset in range(length): input_port = InputPort(self.x + offset + 1,", "port is None: output_port = self._output_port else: output_port = port if output_port is", "max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self, frame, force=False):", "class Track(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "= description self.ports = {} self._grid = grid self.is_passive = is_passive self.do_draw =", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"y\", \"Outputs westward", "= self._grid.listen(right_port) if value.lower() == value.upper() or value.upper() != value: return False else:", ") self.ports.update( { \"x\": InputPort(x - 3, y), \"y\": InputPort(x - 2, y),", "def operation(self, frame, force=False): if not self.has_neighbor(BANG_GLYPH) and not force: return for port_name", "force=False): self._grid.lock(self.x, self.y) for x in range(self.x + 1, self._grid.cols): self._grid.lock(x, self.y) if", "self._grid.listen_as_value(self.ports[\"mod\"]) value = frame % (mod * rate) return value == 0 or", "offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update( { \"x\": InputPort(x - 3, y), \"y\": InputPort(x", "= self.operation(frame, force) for port in self.ports.values(): if isinstance(port, OutputPort) and port.is_bang: continue", "the operator for the given frame and return the payload. This may modify", "InputPort(x - 1, y, clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y + 1),", "+ 3, self.y), \"velocity\": InputPort( self.x + 4, self.y, default=\"f\", clamp=lambda x: min(max(0,", "2, y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(x, 1)), } )", "_NOTES_VALUES = (\"C\", \"c\", \"D\", \"d\", \"E\", \"F\", \"f\", \"G\", \"g\", \"A\", \"a\",", ") self.do_draw = False def operation(self, frame, force=False): self._grid.lock(self.x, self.y) for x in", "key = self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length): self._grid.lock(self.x + offset", "inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x", "*, is_passive=False): super().__init__( grid, x, y, \"generator\", \"Write operands with offset\", glyph=\"g\", is_passive=is_passive,", "\"Send MIDI note\", glyph=\":\", is_passive=True, ) self.ports.update( { \"channel\": InputPort(self.x + 1, self.y),", "y, \"if\", \"Bang if inputs are equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"a\":", "is_passive=False): super().__init__( grid, x, y, \"add\", \"Output sum of inputs\", glyph=\"a\", is_passive=is_passive )", "is_passive=is_passive, ) self.ports.update( { \"x\": InputPort(x - 3, y), \"y\": InputPort(x - 2,", "continue logger.debug( \"Ops %s (%d, %d): locking port @ %d, %d\", self.name, self.x,", ") self.do_draw = False def operation(self, frame, force=False): self.move(-1, 0) self.is_passive = False", "frame, force=False): self._grid.lock(self.x, self.y) for x in range(self.x + 1, self._grid.cols): self._grid.lock(x, self.y)", "%d)\", self.name, self.x, self.y ) elif glyph is None: return else: if self._should_upper_case():", "\"west\", \"Move westward or bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw = False def operation(self,", "grid, x, y, \"if\", \"Bang if inputs are equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update(", "glyph=\"n\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(0, -1) self.is_passive", "return self._grid.key_of(value) class Delay(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15: return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"]) length", "self._grid.listen(self.ports[\"b\"]) return a == b class Increment(IOperator): def __init__(self, grid, x, y, *,", "glyph=\"b\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x +", "InputPort(x - 1, y, default=\"1\"), \"mod\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y", "InputPort(self.x + 1 + key % length, self.y) return self._grid.listen(port) class West(IOperator): def", "\"a\": InputPort(x - 1, y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y", "1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def operation(self,", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"bang\", \"Bangs", "payload = self.operation(frame, force) for port in self.ports.values(): if isinstance(port, OutputPort) and port.is_bang:", "{ \"rate\": InputPort(x - 1, y, clamp=lambda x: max(1, x)), \"mod\": InputPort(x +", "if mod > 0 else 36)) class Jumper(IOperator): def __init__(self, grid, x, y,", "frame is assumed to match the state of the grid given at construction", "= False class Generator(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(-1, 0)", "} ) res = self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator): def __init__(self, grid, x,", "InputPort(x - 1, y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y +", "value = math.floor(frame / rate) % mod return self._grid.key_of(value) class Delay(IOperator): def __init__(self,", "1, y, clamp=lambda x: max(1, x)), \"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME:", "force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b) class North(IOperator):", "self._bang(payload) else: self._output(payload) def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH)", "super().__init__( grid, x, y, \"half\", \"Halts southward operator\", glyph=\"h\", is_passive=is_passive, ) def operation(self,", "1, self.y) port = InputPort(self.x + 1 + key % length, self.y) return", "the payload. This may modify the grid. Note: the frame is assumed to", "self.do_draw = False self.erase() class Comment(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def operation(self, frame, force=False): rate", "offset_x self.y += offset_y self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property", "1 for offset in range(length): input_port = InputPort(self.x + offset + 1, self.y)", "res = self._grid.listen(input_port) self._output(res, output_port) class Halt(IOperator): def __init__(self, grid, x, y, *,", "= random.randint(low, high) return self._grid.key_of(value) class South(IOperator): def __init__(self, grid, x, y, *,", "+ x, self.y + y) == glyph: return True return False def move(self,", "def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self, glyph): for x, y in ((-1,", "*, is_passive=False): super().__init__( grid, x, y, \"south\", \"Move southward or bang\", glyph=\"s\", is_passive=is_passive,", "force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a)) class Clock(IOperator):", "self.y, self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def", "y self.name = name self.description = description self.ports = {} self._grid = grid", "frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"clock\", \"Outputs modulo of", "* b) class North(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "@ (%d, %d)\", self.name, self.x, self.y ) elif glyph is None: return else:", "\"channel\": InputPort(self.x + 1, self.y), \"octave\": InputPort( self.x + 2, self.y, clamp=lambda x:", "+ 4, self.y, default=\"f\", clamp=lambda x: min(max(0, x), 16) ), \"length\": InputPort( self.x", "1, y), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class", "self.is_passive = is_passive self.do_draw = is_passive self.glyph = glyph.upper() if is_passive else glyph", "self._grid.listen(port) class West(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "self.move(1, 0) self.is_passive = False class Generator(IOperator): def __init__(self, grid, x, y, *,", "+ offset, self.y + y) self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\": output_port, } )", "channel > 15: return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"]) length = self._grid.listen_as_value(self.ports[\"length\"])", "value = self._grid.listen(right_port) if value.lower() == value.upper() or value.upper() != value: return False", "(0, 1)): if self._grid.peek(self.x + x, self.y + y) == glyph: return True", "def _should_upper_case(self): output_port = self._output_port if output_port is None or not output_port.is_sensitive: return", "class Jymper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "grid, x, y, \"generator\", \"Write operands with offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update( {", "= InputPort(self.x + offset + 1, self.y) output_port = OutputPort(self.x + x +", "offset + 1, self.y) output_port = OutputPort(self.x + x + offset, self.y +", "range(length): input_port = InputPort(self.x + offset + 1, self.y) output_port = OutputPort(self.x +", "glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self._grid.lock(self.x, self.y) for", "x, y, \"clock\", \"Outputs modulo of frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update( { \"rate\":", "%d): locking port @ %d, %d\", self.name, self.x, self.y, port.x, port.y, ) self._grid.lock(port.x,", "- 1, y, clamp=lambda x: max(1, x)), \"mod\": InputPort(x + 1, y, default=\"8\"),", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"multiply\", \"Output multiplication of", "grid, x, y, \"half\", \"Halts southward operator\", glyph=\"h\", is_passive=is_passive, ) def operation(self, frame,", "y, \"half\", \"Halts southward operator\", glyph=\"h\", is_passive=is_passive, ) def operation(self, frame, force=False): self._grid.lock(self.x,", "mod == 1 class East(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "1), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x,", "description, *, glyph=DOT_GLYPH, is_passive=False ): self.x = x self.y = y self.name =", "\"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False):", "default=\"1\"), \"mod\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), }", "= self._output_port if output_port is None or not output_port.is_sensitive: return False else: right_port", "a = self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return a == b class Increment(IOperator): def", "\"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"]) if not", "given at construction time.\"\"\" def __str__(self): return self.name def run(self, frame, force=False): payload", "def run(self, frame, force=False): payload = self.operation(frame, force) for port in self.ports.values(): if", "OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): step", "y), OUTPUT_PORT_NAME: OutputPort(x + 1, y), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x,", "y, \"j\", \"Outputs northward operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x, y", "\"f\", \"G\", \"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX = {k: i for i, k", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"north\", \"Move", "super().__init__( grid, x, y, \"y\", \"Outputs westward operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update( {", "not force: return for port_name in \"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH:", "def operation(self, frame, force=False): a = self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return a ==", "y, \"add\", \"Output sum of inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update( { \"a\": InputPort(x", "line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self._grid.lock(self.x, self.y)", "self.do_draw = False def operation(self, frame, force=False): self.move(1, 0) self.is_passive = False class", "force=False): self.move(1, 0) self.is_passive = False class Generator(IOperator): def __init__(self, grid, x, y,", "Midi(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"midi\",", "glyph, port=None): if port is None: output_port = self._output_port else: output_port = port", "InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def", "x, y in ((-1, 0), (1, 0), (0, -1), (0, 1)): if self._grid.peek(self.x", "run(self, frame, force=False): payload = self.operation(frame, force) for port in self.ports.values(): if isinstance(port,", "OutputPort(x + 1, y), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return", "new_y) if collider not in (BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase() self.x += offset_x", "\"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return", "self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "x, y, \"east\", \"Move eastwards or bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw = False", "\"D\", \"d\", \"E\", \"F\", \"f\", \"G\", \"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX = {k:", "and not force: return for port_name in \"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name]) ==", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"substract\", \"Output", "import logging import math import random from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent", "frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda x:", "force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = frame % (mod *", "self.ports.update( { \"step\": InputPort(x - 1, y, default=\"1\"), \"mod\": InputPort(x + 1, y),", "self.ports.update( { \"x\": InputPort(x - 3, y), \"y\": InputPort(x - 2, y), \"len\":", "= self._grid.listen(self.ports[\"b\"]) return a == b class Increment(IOperator): def __init__(self, grid, x, y,", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"random\", \"Outputs random value\", glyph=\"r\",", "= self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset in", "output_port = self._output_port else: output_port = port if output_port is None: logging.warn( \"No", "abc import logging import math import random from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH,", "def _output(self, glyph, port=None): if port is None: output_port = self._output_port else: output_port", "+ 1, is_bang=True), } ) def operation(self, frame, force=False): a = self._grid.listen(self.ports[\"a\"]) b", ") self._grid.lock(port.x, port.y) output_port = self._output_port if output_port: if output_port.is_bang: self._bang(payload) else: self._output(payload)", "if output_port.is_bang: self._bang(payload) else: self._output(payload) def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x,", "return collider = self._grid.peek(new_x, new_y) if collider not in (BANG_GLYPH, DOT_GLYPH): self.explode() return", "x, y, \"j\", \"Outputs northward operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x,", "y, clamp=lambda x: max(1, x)), \"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x,", "= False class Jymper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self,", "3, y), \"y\": InputPort(x - 2, y), \"len\": InputPort(x - 1, y, clamp=lambda", "*, is_passive=False): super().__init__( grid, x, y, \"half\", \"Halts southward operator\", glyph=\"h\", is_passive=is_passive, )", "OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): low", "None: return else: if self._should_upper_case(): value = glyph.upper() else: value = glyph self._grid.poke(output_port.x,", "1, is_bang=True), } ) def operation(self, frame, force=False): a = self._grid.listen(self.ports[\"a\"]) b =", "port if output_port is None: logging.warn( \"No output port for operator %s @", "= self._grid.peek(new_x, new_y) if collider not in (BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase() self.x", "= port if output_port is None: logging.warn( \"No output port for operator %s", "y, *, is_passive=False): super().__init__( grid, x, y, \"substract\", \"Output difference of inputs\", glyph=\"b\",", "is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(1, 0) self.is_passive =", "OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): rate", "InputPort( self.x + 2, self.y, clamp=lambda x: min(max(0, x), 8) ), \"note\": InputPort(self.x", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"delay\", \"Bangs on module", "self._grid.key_of(index) class Substract(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update( { \"key\": InputPort(x - 2, y), \"len\": InputPort(x", "OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): a", "index = self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator): def __init__(self,", "self.ports.update( { \"key\": InputPort(x - 2, y), \"len\": InputPort(x - 1, y, clamp=lambda", "neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.do_draw", "+ 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def", "step) % (mod if mod > 0 else 36)) class Jumper(IOperator): def __init__(self,", "force=False): self.do_draw = False self.erase() class Comment(IOperator): def __init__(self, grid, x, y, *,", "def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self, grid,", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"add\", \"Output sum of inputs\",", "force=False): a = self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return a == b class Increment(IOperator):", "+ 1, is_sensitive=True), } ) def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b", "\"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), }", "are equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\":", ") def operation(self, frame, force=False): if not self.has_neighbor(BANG_GLYPH) and not force: return for", "value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update( { \"min\": InputPort(x - 1, y), \"max\": InputPort(x", ") def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self,", "output_port = OutputPort(self.x + x + offset, self.y + y) self.ports.update( { f\"input{offset}\":", "is_passive self.glyph = glyph.upper() if is_passive else glyph @abc.abstractmethod def operation(self, frame, force=False):", "self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step) % (mod", "is_passive=False): super().__init__( grid, x, y, \"random\", \"Outputs random value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update(", "== b class Increment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "i, k in enumerate(_NOTES_VALUES)} class Midi(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports import InputPort, OutputPort logger =", "frame, force=False): index = self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator):", "\"east\", \"Move eastwards or bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw = False def operation(self,", "False def move(self, offset_x, offset_y): new_x = self.x + offset_x new_y = self.y", "@property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME in self.ports def _should_upper_case(self):", "class Substract(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "def __init__( self, grid, x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False ): self.x", "if not self.has_neighbor(BANG_GLYPH) and not force: return for port_name in \"channel\", \"octave\", \"note\":", "\"x\": InputPort(x - 3, y), \"y\": InputPort(x - 2, y), \"len\": InputPort(x -", "x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self, frame,", "operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self, grid, x,", "MIDI note\", glyph=\":\", is_passive=True, ) self.ports.update( { \"channel\": InputPort(self.x + 1, self.y), \"octave\":", "import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports import InputPort, OutputPort logger = logging.getLogger(__name__)", "frame % (mod * rate) return value == 0 or mod == 1", "octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"]) length = self._grid.listen_as_value(self.ports[\"length\"]) self._grid.push_midi(MidiNoteOnEvent(channel, octave, note, velocity,", "+ 1, self.y) output_port = OutputPort(self.x + x + offset, self.y + y)", "= glyph.upper() else: value = glyph self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator): def __init__(self,", "+ 1, is_sensitive=True), } ) def operation(self, frame, force=False): index = self._grid.listen_as_value( self.ports[\"a\"]", "westward or bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False):", "= self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15:", "True def _bang(self, payload): output_port = self._output_port if output_port is None: logger.warn(\"Trying to", "is_sensitive=True), } ) def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"])", "frame, force=False): self.move(0, 1) self.is_passive = False class Track(IOperator): def __init__(self, grid, x,", "self._output(res, output_port) class Halt(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "to bang, but no output port.\") return else: glyph = BANG_GLYPH if payload", "self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length): self._grid.lock(self.x + offset + 1, self.y) port =", "1, is_bang=True), } ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod =", "force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame / rate) %", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"midi\", \"Send MIDI", "glyph @abc.abstractmethod def operation(self, frame, force=False): \"\"\"Run the operator for the given frame", "= self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame / rate) % mod return", "move(self, offset_x, offset_y): new_x = self.x + offset_x new_y = self.y + offset_y", "glyph=\"a\", is_passive=is_passive ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x +", "self.y = y self.name = name self.description = description self.ports = {} self._grid", "frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = frame % (mod", "is_passive=False): super().__init__( grid, x, y, \"north\", \"Move northward or bang\", glyph=\"n\", is_passive=is_passive, )", "y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), }", "self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self, glyph): for x, y in ((-1, 0), (1,", "westward operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x - 1, y), OUTPUT_PORT_NAME:", "+ 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def operation(self,", "BANG_GLYPH if payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def _output(self, glyph, port=None): if", "glyph=\"e\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(1, 0) self.is_passive", "b class Increment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "1, self.y) value = self._grid.listen(right_port) if value.lower() == value.upper() or value.upper() != value:", "y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(x, 1)), } ) def", "self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "rate) return value == 0 or mod == 1 class East(IOperator): def __init__(self,", "x, y, \"random\", \"Outputs random value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update( { \"min\": InputPort(x", "= self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15: return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"])", "inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x", "frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda x:", "\"Output sum of inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update( { \"a\": InputPort(x - 1,", "port for operator %s @ (%d, %d)\", self.name, self.x, self.y ) elif glyph", "self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self):", "is_passive self.do_draw = is_passive self.glyph = glyph.upper() if is_passive else glyph @abc.abstractmethod def", "y, default=\"1\"), \"mod\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),", "OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): rate =", "+ 1, y), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"])", "offset in range(length): input_port = InputPort(self.x + offset + 1, self.y) output_port =", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"bang\", \"Bangs neighboring", "y, *, is_passive=False): super().__init__( grid, x, y, \"east\", \"Move eastwards or bang\", glyph=\"e\",", "(%d, %d): locking port @ %d, %d\", self.name, self.x, self.y, port.x, port.y, )", "+ 1, self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x, self.y) == self.glyph: break _NOTES_VALUES =", "sum of inputs\", glyph=\"a\", is_passive=is_passive ) self.ports.update( { \"a\": InputPort(x - 1, y),", "self._grid.key_of(abs(b - a)) class Clock(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", ") self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda x: max(1, x)), \"mod\":", "inputs are equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y),", "x, self.y + y) == glyph: return True return False def move(self, offset_x,", "} ) def operation(self, frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for", "force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out +", "output_port) class Halt(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "self._grid.lock(self.x, self.y) for x in range(self.x + 1, self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x,", "super().__init__( grid, x, y, \"delay\", \"Bangs on module of frame\", glyph=\"d\", is_passive=is_passive, )", "super().__init__( grid, x, y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw = False", "super().__init__( grid, x, y, \"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw =", "length = self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length): self._grid.lock(self.x + offset + 1, self.y)", "operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = frame %", "operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(a * b)", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"if\", \"Bang", "for port_name in \"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note =", "self.ports.update( { \"rate\": InputPort(x - 1, y, clamp=lambda x: max(1, x)), \"mod\": InputPort(x", ") self.do_draw = False def operation(self, frame, force=False): self.do_draw = False self.erase() class", "x, y, \"generator\", \"Write operands with offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update( { \"x\":", "{ \"key\": InputPort(x - 2, y), \"len\": InputPort(x - 1, y, clamp=lambda x:", "- 1, y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1,", "force=False): index = self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class Substract(IOperator): def", "\"velocity\": InputPort( self.x + 4, self.y, default=\"f\", clamp=lambda x: min(max(0, x), 16) ),", ") def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b", "\"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX = {k: i for i, k in enumerate(_NOTES_VALUES)}", "> 0 else 36)) class Jumper(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "grid, x, y, \"delay\", \"Bangs on module of frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update(", "{} self._grid = grid self.is_passive = is_passive self.do_draw = is_passive self.glyph = glyph.upper()", "length, self.y) return self._grid.listen(port) class West(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "collider = self._grid.peek(new_x, new_y) if collider not in (BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase()", "- 2, y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME:", "self._output_port.y) return self._grid.listen(self.ports[\"val\"]) class Multiply(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "1, y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),", "False else: right_port = InputPort(self.x + 1, self.y) value = self._grid.listen(right_port) if value.lower()", "\"Move northward or bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame,", "mod return self._grid.key_of(value) class Delay(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "\"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame,", "a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a)) class Clock(IOperator): def", "InputPort(x - 1, y, clamp=lambda x: max(x, 1)), } ) def operation(self, frame,", "= self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return a == b class Increment(IOperator): def __init__(self,", "\"E\", \"F\", \"f\", \"G\", \"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX = {k: i for", "glyph=\"g\", is_passive=is_passive, ) self.ports.update( { \"x\": InputPort(x - 3, y), \"y\": InputPort(x -", "clamp=lambda x: min(max(0, x), 16) ), \"length\": InputPort( self.x + 5, self.y, clamp=lambda", "= y self.name = name self.description = description self.ports = {} self._grid =", "self.do_draw = False def operation(self, frame, force=False): self.move(0, -1) self.is_passive = False class", "time.\"\"\" def __str__(self): return self.name def run(self, frame, force=False): payload = self.operation(frame, force)", "y + 1), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return self._grid.listen(self.ports[\"val\"])", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"east\", \"Move eastwards or bang\",", "mod > 0 else 36)) class Jumper(IOperator): def __init__(self, grid, x, y, *,", "False class Jymper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "= grid self.is_passive = is_passive self.do_draw = is_passive self.glyph = glyph.upper() if is_passive", "x, y, \"increment\", \"Increment operator southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update( { \"step\": InputPort(x", "1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame,", "multiplication of inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y),", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"add\", \"Output", "is_passive=is_passive ) self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x + 1,", "x, y, \"substract\", \"Output difference of inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update( { \"a\":", "x, y, \"multiply\", \"Output multiplication of inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update( { \"a\":", "1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def operation(self, frame,", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"y\", \"Outputs westward operator\",", "- 1, y), OUTPUT_PORT_NAME: OutputPort(x + 1, y), } ) def operation(self, frame,", "self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME:", "OutputPort(x, y + 1, is_bang=True), } ) def operation(self, frame, force=False): a =", "self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self, glyph): for x, y", "x), 16) ), \"length\": InputPort( self.x + 5, self.y, clamp=lambda x: min(max(0, x),", "= self._output_port if output_port is None: logger.warn(\"Trying to bang, but no output port.\")", "value: return False else: return True def _bang(self, payload): output_port = self._output_port if", "self.y + y) == glyph: return True return False def move(self, offset_x, offset_y):", "self.erase() self.x += offset_x self.y += offset_y self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x, self.y):", "*, is_passive=False): super().__init__( grid, x, y, \"east\", \"Move eastwards or bang\", glyph=\"e\", is_passive=is_passive,", "= False def operation(self, frame, force=False): self.move(0, -1) self.is_passive = False class Random(IOperator):", "def operation(self, frame, force=False): index = self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index)", "y, *, is_passive=False): super().__init__( grid, x, y, \"add\", \"Output sum of inputs\", glyph=\"a\",", "self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x, self.y) == self.glyph: break _NOTES_VALUES = (\"C\", \"c\",", "= self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = frame % (mod * rate) return", "for offset in range(length): input_port = InputPort(self.x + offset + 1, self.y) output_port", "16) ), \"length\": InputPort( self.x + 5, self.y, clamp=lambda x: min(max(0, x), 32)", "\"j\", \"Outputs northward operator\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x, y -", "_has_output_port(self): return OUTPUT_PORT_NAME in self.ports def _should_upper_case(self): output_port = self._output_port if output_port is", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"midi\", \"Send MIDI note\", glyph=\":\",", "__str__(self): return self.name def run(self, frame, force=False): payload = self.operation(frame, force) for port", "frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame / rate)", "return the payload. This may modify the grid. Note: the frame is assumed", "is_passive=False): super().__init__( grid, x, y, \"west\", \"Move westward or bang\", glyph=\"w\", is_passive=is_passive, )", "default=\"f\", clamp=lambda x: min(max(0, x), 16) ), \"length\": InputPort( self.x + 5, self.y,", "glyph: return True return False def move(self, offset_x, offset_y): new_x = self.x +", "1 class East(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "+ 1, self.y) port = InputPort(self.x + 1 + key % length, self.y)", "__init__( self, grid, x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False ): self.x =", "is_passive=False): super().__init__( grid, x, y, \"east\", \"Move eastwards or bang\", glyph=\"e\", is_passive=is_passive, )", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"j\", \"Outputs northward", "y, *, is_passive=False): super().__init__( grid, x, y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, )", "\"north\", \"Move northward or bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw = False def operation(self,", "InputPort( self.x + 4, self.y, default=\"f\", clamp=lambda x: min(max(0, x), 16) ), \"length\":", "\"Output difference of inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1,", "offset_y if not self._grid.is_inside(new_x, new_y): self.explode() return collider = self._grid.peek(new_x, new_y) if collider", "default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False):", "force=False): self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y) class If(IOperator): def __init__(self, grid,", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"north\", \"Move northward", "False def operation(self, frame, force=False): self.move(0, 1) self.is_passive = False class Track(IOperator): def", "class North(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "not output_port.is_sensitive: return False else: right_port = InputPort(self.x + 1, self.y) value =", "\"track\", \"Reads eastward operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update( { \"key\": InputPort(x - 2,", "modulo of frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y,", "False class Track(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "\"if\", \"Bang if inputs are equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x", "+ key % length, self.y) return self._grid.listen(port) class West(IOperator): def __init__(self, grid, x,", "erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH) def explode(self): self._grid.poke(self.x, self.y, BANG_GLYPH) def has_neighbor(self, glyph): for", "self._output_port if output_port: if output_port.is_bang: self._bang(payload) else: self._output(payload) def erase(self): self._grid.poke(self.x, self.y, DOT_GLYPH)", "state of the grid given at construction time.\"\"\" def __str__(self): return self.name def", "mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = math.floor(frame / rate) % mod return self._grid.key_of(value) class", "super().__init__( grid, x, y, \"multiply\", \"Output multiplication of inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update(", "offset_y self._grid.poke(self.x, self.y, self.glyph) if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property def _output_port(self): return", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"random\", \"Outputs random value\",", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"generator\", \"Write operands", "= self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high) return self._grid.key_of(value) class South(IOperator):", "self.y) output_port = OutputPort(self.x + x + offset, self.y + y) self.ports.update( {", "the frame is assumed to match the state of the grid given at", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"comment\", \"Halts", "{ \"channel\": InputPort(self.x + 1, self.y), \"octave\": InputPort( self.x + 2, self.y, clamp=lambda", "InputPort( self.x + 5, self.y, clamp=lambda x: min(max(0, x), 32) ), } )", "import math import random from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports", "key % length, self.y) return self._grid.listen(port) class West(IOperator): def __init__(self, grid, x, y,", "self.ports.update( { \"channel\": InputPort(self.x + 1, self.y), \"octave\": InputPort( self.x + 2, self.y,", "max(x, 1)), } ) def operation(self, frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x =", "self.is_passive = False class Track(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "offset, self.y + y) self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\": output_port, } ) res", "\"c\", \"D\", \"d\", \"E\", \"F\", \"f\", \"G\", \"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX =", "self.y + y) self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\": output_port, } ) res =", "self.has_neighbor(BANG_GLYPH) and not force: return for port_name in \"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name])", "= OutputPort(self.x + x + offset, self.y + y) self.ports.update( { f\"input{offset}\": input_port,", "def operation(self, frame, force=False): self._grid.lock(self.x, self.y + 1) # self._output_port.x, self._output_port.y) class If(IOperator):", "glyph=\"w\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(-1, 0) self.is_passive", "glyph) def _output(self, glyph, port=None): if port is None: output_port = self._output_port else:", "\"A\", \"a\", \"B\") NOTE_TO_INDEX = {k: i for i, k in enumerate(_NOTES_VALUES)} class", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"random\", \"Outputs random", "\"Move westward or bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame,", "logger.debug( \"Ops %s (%d, %d): locking port @ %d, %d\", self.name, self.x, self.y,", "difference of inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x - 1, y),", "else: glyph = BANG_GLYPH if payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def _output(self,", "\"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self._grid.lock(self.x,", "force=False): if not self.has_neighbor(BANG_GLYPH) and not force: return for port_name in \"channel\", \"octave\",", ") def operation(self, frame, force=False): a = self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return a", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"add\", \"Output sum of", "y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): index = self._grid.listen_as_value(", "default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_bang=True), } ) def operation(self, frame, force=False):", "x), 32) ), } ) def operation(self, frame, force=False): if not self.has_neighbor(BANG_GLYPH) and", "note = self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel >", "for offset in range(length): self._grid.lock(self.x + offset + 1, self.y) port = InputPort(self.x", "0 else 36)) class Jumper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "else: return True def _bang(self, payload): output_port = self._output_port if output_port is None:", "with offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update( { \"x\": InputPort(x - 3, y), \"y\":", "\"mod\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } )", "== self.glyph: break _NOTES_VALUES = (\"C\", \"c\", \"D\", \"d\", \"E\", \"F\", \"f\", \"G\",", "a == b class Increment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) + 1 for", "grid, x, y, \"south\", \"Move southward or bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw =", "clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self,", "+ 2, self.y, clamp=lambda x: min(max(0, x), 8) ), \"note\": InputPort(self.x + 3,", "self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "\"Reads eastward operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update( { \"key\": InputPort(x - 2, y),", "b) class North(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "OUTPUT_PORT_NAME: OutputPort(x + 1, y), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y)", "logging.warn( \"No output port for operator %s @ (%d, %d)\", self.name, self.x, self.y", "BANG_GLYPH) def has_neighbor(self, glyph): for x, y in ((-1, 0), (1, 0), (0,", "output_port = port if output_port is None: logging.warn( \"No output port for operator", "*, glyph=DOT_GLYPH, is_passive=False ): self.x = x self.y = y self.name = name", "super().__init__( grid, x, y, \"midi\", \"Send MIDI note\", glyph=\":\", is_passive=True, ) self.ports.update( {", "Delay(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"delay\",", "{ \"a\": InputPort(x - 1, y), \"b\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x,", "(\"C\", \"c\", \"D\", \"d\", \"E\", \"F\", \"f\", \"G\", \"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX", "bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(0, 1)", "self.name def run(self, frame, force=False): payload = self.operation(frame, force) for port in self.ports.values():", "bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(1, 0)", "is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(0, 1) self.is_passive =", "\"Write operands with offset\", glyph=\"g\", is_passive=is_passive, ) self.ports.update( { \"x\": InputPort(x - 3,", "x, y, \"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw = False def", "= False def operation(self, frame, force=False): self.do_draw = False self.erase() class Comment(IOperator): def", "North(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"north\",", "self.y) port = InputPort(self.x + 1 + key % length, self.y) return self._grid.listen(port)", "False self.erase() class Comment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "is_sensitive=True), } ) def operation(self, frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"])", "4, self.y, default=\"f\", clamp=lambda x: min(max(0, x), 16) ), \"length\": InputPort( self.x +", "import InputPort, OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC): def __init__(", "self._grid.listen(self.ports[\"val\"]) class Bang(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "*, is_passive=False): super().__init__( grid, x, y, \"track\", \"Reads eastward operand\", glyph=\"t\", is_passive=is_passive, )", "class West(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "OutputPort(x, y + 1, is_bang=True), } ) def operation(self, frame, force=False): rate =", "self.y) @property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME in self.ports def", "+ 1, is_bang=True), } ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod", "operation(self, frame, force=False): a = self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return a == b", "*, is_passive=False): super().__init__( grid, x, y, \"substract\", \"Output difference of inputs\", glyph=\"b\", is_passive=is_passive,", "if payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def _output(self, glyph, port=None): if port", "15: return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity = self._grid.listen_as_value(self.ports[\"velocity\"]) length = self._grid.listen_as_value(self.ports[\"length\"]) self._grid.push_midi(MidiNoteOnEvent(channel, octave,", "self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME", "frame and return the payload. This may modify the grid. Note: the frame", "not self.has_neighbor(BANG_GLYPH) and not force: return for port_name in \"channel\", \"octave\", \"note\": if", "= frame % (mod * rate) return value == 0 or mod ==", "36)) class Jumper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "self.do_draw = False def operation(self, frame, force=False): self._grid.lock(self.x, self.y) for x in range(self.x", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"clock\", \"Outputs modulo", "is_passive=False): super().__init__( grid, x, y, \"half\", \"Halts southward operator\", glyph=\"h\", is_passive=is_passive, ) def", "\"multiply\", \"Output multiplication of inputs\", glyph=\"m\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x -", "False def operation(self, frame, force=False): self.move(1, 0) self.is_passive = False class Generator(IOperator): def", "InputPort(self.x + 1, self.y) value = self._grid.listen(right_port) if value.lower() == value.upper() or value.upper()", "Increment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"increment\",", "InputPort(x - 3, y), \"y\": InputPort(x - 2, y), \"len\": InputPort(x - 1,", "0), (0, -1), (0, 1)): if self._grid.peek(self.x + x, self.y + y) ==", "if self._grid.peek(x, self.y) == self.glyph: break _NOTES_VALUES = (\"C\", \"c\", \"D\", \"d\", \"E\",", "self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a)) class Clock(IOperator): def __init__(self, grid,", "self.glyph: break _NOTES_VALUES = (\"C\", \"c\", \"D\", \"d\", \"E\", \"F\", \"f\", \"G\", \"g\",", "else: right_port = InputPort(self.x + 1, self.y) value = self._grid.listen(right_port) if value.lower() ==", ") elif glyph is None: return else: if self._should_upper_case(): value = glyph.upper() else:", ") self.ports.update( { \"a\": InputPort(x - 1, y), \"b\": InputPort(x + 1, y),", "logger = logging.getLogger(__name__) OUTPUT_PORT_NAME = \"output\" class IOperator(abc.ABC): def __init__( self, grid, x,", "self.name = name self.description = description self.ports = {} self._grid = grid self.is_passive", "%s (%d, %d): locking port @ %d, %d\", self.name, self.x, self.y, port.x, port.y,", "else glyph @abc.abstractmethod def operation(self, frame, force=False): \"\"\"Run the operator for the given", "West(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"west\",", "self.move(0, 1) self.is_passive = False class Track(IOperator): def __init__(self, grid, x, y, *,", "_should_upper_case(self): output_port = self._output_port if output_port is None or not output_port.is_sensitive: return False", "to match the state of the grid given at construction time.\"\"\" def __str__(self):", "Bang(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"bang\",", "self.name, self.x, self.y, port.x, port.y, ) self._grid.lock(port.x, port.y) output_port = self._output_port if output_port:", "8) ), \"note\": InputPort(self.x + 3, self.y), \"velocity\": InputPort( self.x + 4, self.y,", "grid, x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False ): self.x = x self.y", "1, y), \"max\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True),", "value.lower() == value.upper() or value.upper() != value: return False else: return True def", "is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x - 1, y), OUTPUT_PORT_NAME: OutputPort(x + 1,", "2, self.y, clamp=lambda x: min(max(0, x), 8) ), \"note\": InputPort(self.x + 3, self.y),", "glyph=\"y\", is_passive=is_passive, ) self.ports.update( { \"val\": InputPort(x - 1, y), OUTPUT_PORT_NAME: OutputPort(x +", "def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = frame", "OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): a =", "} ) def operation(self, frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x = self._grid.listen_as_value(self.ports[\"x\"]) y", "+ 1, is_sensitive=True), } ) def operation(self, frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high", "super().__init__( grid, x, y, \"track\", \"Reads eastward operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update( {", "# self._output_port.x, self._output_port.y) class If(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b - a))", "% (mod * rate) return value == 0 or mod == 1 class", "if self._grid.peek(self.x + x, self.y + y) == glyph: return True return False", "\"G\", \"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX = {k: i for i, k in", "operator %s @ (%d, %d)\", self.name, self.x, self.y ) elif glyph is None:", "x: max(x, 1)), } ) def operation(self, frame, force=False): length = self._grid.listen_as_value(self.ports[\"len\"]) x", "y, name, description, *, glyph=DOT_GLYPH, is_passive=False ): self.x = x self.y = y", "output_port is None: logging.warn( \"No output port for operator %s @ (%d, %d)\",", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"track\", \"Reads eastward", "self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15: return", "self.move(-1, 0) self.is_passive = False class Jymper(IOperator): def __init__(self, grid, x, y, *,", "class Halt(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "y, \"south\", \"Move southward or bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw = False def", "force=False): key = self._grid.listen_as_value(self.ports[\"key\"]) length = self._grid.listen_as_value(self.ports[\"len\"]) for offset in range(length): self._grid.lock(self.x +", "northward or bang\", glyph=\"n\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False):", "frame, force=False): a = self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return a == b class", "self._grid.key_of(value) class Delay(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x,", "x = self._grid.listen_as_value(self.ports[\"x\"]) y = self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset in range(length): input_port", "\"val\": InputPort(x - 1, y), OUTPUT_PORT_NAME: OutputPort(x + 1, y), } ) def", "% length, self.y) return self._grid.listen(port) class West(IOperator): def __init__(self, grid, x, y, *,", "class Increment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "= False def operation(self, frame, force=False): self.move(-1, 0) self.is_passive = False class Jymper(IOperator):", "y, \"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame,", "is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.do_draw = False self.erase()", "y, *, is_passive=False): super().__init__( grid, x, y, \"clock\", \"Outputs modulo of frame\", glyph=\"c\",", "\"comment\", \"Halts line\", glyph=COMMENT_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False):", "\"octave\", \"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX:", "force=False): self.move(0, 1) self.is_passive = False class Track(IOperator): def __init__(self, grid, x, y,", "payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def _output(self, glyph, port=None): if port is", "if not NOTE_TO_INDEX: return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15: return octave", "\"d\", \"E\", \"F\", \"f\", \"G\", \"g\", \"A\", \"a\", \"B\") NOTE_TO_INDEX = {k: i", "grid, x, y, \"west\", \"Move westward or bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw =", "is_sensitive=True), } ) def operation(self, frame, force=False): index = self._grid.listen_as_value( self.ports[\"a\"] ) +", "1, y), OUTPUT_PORT_NAME: OutputPort(x + 1, y), } ) def operation(self, frame, force=False):", "x, y, \"delay\", \"Bangs on module of frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update( {", "DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def _output(self, glyph, port=None): if port is None: output_port", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"north\", \"Move northward or bang\",", "return False else: right_port = InputPort(self.x + 1, self.y) value = self._grid.listen(right_port) if", "return False else: return True def _bang(self, payload): output_port = self._output_port if output_port", "for x in range(self.x + 1, self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x, self.y) ==", "Substract(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"substract\",", "def operation(self, frame, force=False): self.move(0, -1) self.is_passive = False class Random(IOperator): def __init__(self,", "southward or bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False):", "rate) % mod return self._grid.key_of(value) class Delay(IOperator): def __init__(self, grid, x, y, *,", "- 1), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def operation(self, frame, force=False):", ") self.ports.update( { \"val\": InputPort(x - 1, y), OUTPUT_PORT_NAME: OutputPort(x + 1, y),", "self.y) value = self._grid.listen(right_port) if value.lower() == value.upper() or value.upper() != value: return", "*, is_passive=False): super().__init__( grid, x, y, \"delay\", \"Bangs on module of frame\", glyph=\"d\",", "} ) def operation(self, frame, force=False): index = self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"])", "for x, y in ((-1, 0), (1, 0), (0, -1), (0, 1)): if", "self.y) for x in range(self.x + 1, self._grid.cols): self._grid.lock(x, self.y) if self._grid.peek(x, self.y)", "self._grid.listen_as_value(self.ports[\"rate\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) value = frame % (mod * rate) return value", "operation(self, frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return", "OutputPort(x, y + 1), } ) def operation(self, frame, force=False): key = self._grid.listen_as_value(self.ports[\"key\"])", "+ 1 + key % length, self.y) return self._grid.listen(port) class West(IOperator): def __init__(self,", "*, is_passive=False): super().__init__( grid, x, y, \"clock\", \"Outputs modulo of frame\", glyph=\"c\", is_passive=is_passive,", "{ \"x\": InputPort(x - 3, y), \"y\": InputPort(x - 2, y), \"len\": InputPort(x", "COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports import InputPort, OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME =", "is_bang=True), } ) def operation(self, frame, force=False): a = self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"])", "is_passive=False): super().__init__( grid, x, y, \"track\", \"Reads eastward operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update(", "class South(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "class Generator(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "super().__init__( grid, x, y, \"east\", \"Move eastwards or bang\", glyph=\"e\", is_passive=is_passive, ) self.do_draw", "y, *, is_passive=False): super().__init__( grid, x, y, \"j\", \"Outputs northward operator\", glyph=\"f\", is_passive=is_passive,", "y) self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\": output_port, } ) res = self._grid.listen(input_port) self._output(res,", "or mod == 1 class East(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", ") self.ports.update( { \"min\": InputPort(x - 1, y), \"max\": InputPort(x + 1, y),", "def operation(self, frame, force=False): self.do_draw = False self.erase() class Comment(IOperator): def __init__(self, grid,", "y, *, is_passive=False): super().__init__( grid, x, y, \"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive,", "def _bang(self, payload): output_port = self._output_port if output_port is None: logger.warn(\"Trying to bang,", "y, *, is_passive=False): super().__init__( grid, x, y, \"south\", \"Move southward or bang\", glyph=\"s\",", "or bang\", glyph=\"s\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(0,", "+ 1, is_sensitive=True), } ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod", "!= value: return False else: return True def _bang(self, payload): output_port = self._output_port", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"east\", \"Move eastwards or", "for the given frame and return the payload. This may modify the grid.", "self._grid.peek(self.x + x, self.y + y) == glyph: return True return False def", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"add\", \"Output sum", "- 1, y), \"max\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1,", "class Multiply(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", ") self.ports.update( { \"key\": InputPort(x - 2, y), \"len\": InputPort(x - 1, y,", "grid, x, y, \"y\", \"Outputs westward operator\", glyph=\"y\", is_passive=is_passive, ) self.ports.update( { \"val\":", ") def operation(self, frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value =", "x, y, *, is_passive=False): super().__init__( grid, x, y, \"track\", \"Reads eastward operand\", glyph=\"t\",", "force: return for port_name in \"channel\", \"octave\", \"note\": if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return", "False def operation(self, frame, force=False): self.do_draw = False self.erase() class Comment(IOperator): def __init__(self,", "mod = self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step) % (mod if", "operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.do_draw =", "super().__init__( grid, x, y, \"random\", \"Outputs random value\", glyph=\"r\", is_passive=is_passive, ) self.ports.update( {", "is None: output_port = self._output_port else: output_port = port if output_port is None:", "port = InputPort(self.x + 1 + key % length, self.y) return self._grid.listen(port) class", "return channel = self._grid.listen_as_value(self.ports[\"channel\"]) if channel > 15: return octave = self._grid.listen_as_value(self.ports[\"octave\"]) velocity", "y + 1, is_bang=True), } ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"])", "y = self._grid.listen_as_value(self.ports[\"y\"]) + 1 for offset in range(length): input_port = InputPort(self.x +", "y, clamp=lambda x: max(x, 1)), } ) def operation(self, frame, force=False): length =", "math import random from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports import", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"west\", \"Move westward", "glyph=\"r\", is_passive=is_passive, ) self.ports.update( { \"min\": InputPort(x - 1, y), \"max\": InputPort(x +", "\"step\": InputPort(x - 1, y, default=\"1\"), \"mod\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x,", "\"max\": InputPort(x + 1, y), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } )", "Comment(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"comment\",", "- 2, y), \"len\": InputPort(x - 1, y, clamp=lambda x: max(x, 1)), }", "step = self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step)", "+ x + offset, self.y + y) self.ports.update( { f\"input{offset}\": input_port, f\"output{offset}\": output_port,", "def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"clock\", \"Outputs", "def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(abs(b -", "class Clock(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y,", "} ) def operation(self, frame, force=False): a = self._grid.listen_as_value(self.ports[\"a\"]) b = self._grid.listen_as_value(self.ports[\"b\"]) return", "\"mod\": InputPort(x + 1, y, default=\"8\"), OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), }", "\"increment\", \"Increment operator southward\", glyph=\"i\", is_passive=is_passive, ) self.ports.update( { \"step\": InputPort(x - 1,", "__init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"south\", \"Move southward", "None: logging.warn( \"No output port for operator %s @ (%d, %d)\", self.name, self.x,", "_bang(self, payload): output_port = self._output_port if output_port is None: logger.warn(\"Trying to bang, but", "self._grid.listen_as_value(self.ports[\"mod\"]) out = self._grid.listen_as_value(self.ports[OUTPUT_PORT_NAME]) return self._grid.key_of((out + step) % (mod if mod >", "self.glyph = glyph.upper() if is_passive else glyph @abc.abstractmethod def operation(self, frame, force=False): \"\"\"Run", "\"Outputs modulo of frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1,", "if collider not in (BANG_GLYPH, DOT_GLYPH): self.explode() return self.erase() self.x += offset_x self.y", "%d, %d\", self.name, self.x, self.y, port.x, port.y, ) self._grid.lock(port.x, port.y) output_port = self._output_port", "self.name, self.x, self.y ) elif glyph is None: return else: if self._should_upper_case(): value", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"generator\", \"Write operands with", "self._grid.key_of(a * b) class North(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__(", "operation(self, frame, force=False): index = self._grid.listen_as_value( self.ports[\"a\"] ) + self._grid.listen_as_value(self.ports[\"b\"]) return self._grid.key_of(index) class", "grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"track\", \"Reads eastward operand\",", "_output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return OUTPUT_PORT_NAME in self.ports def _should_upper_case(self): output_port =", "*, is_passive=False): super().__init__( grid, x, y, \"north\", \"Move northward or bang\", glyph=\"n\", is_passive=is_passive,", "= self._grid.listen_as_value(self.ports[\"mod\"]) value = frame % (mod * rate) return value == 0", "return self._grid.key_of(abs(b - a)) class Clock(IOperator): def __init__(self, grid, x, y, *, is_passive=False):", "y, *, is_passive=False): super().__init__( grid, x, y, \"delay\", \"Bangs on module of frame\",", "else 36)) class Jumper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid,", "OutputPort(x, y + 1), } ) def operation(self, frame, force=False): self._grid.lock(self._output_port.x, self._output_port.y) return", "\"len\": InputPort(x - 1, y, clamp=lambda x: max(x, 1)), } ) def operation(self,", "Jumper(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"j\",", "InputPort(x - 1, y), OUTPUT_PORT_NAME: OutputPort(x + 1, y), } ) def operation(self,", "is_passive=False): super().__init__( grid, x, y, \"bang\", \"Bangs neighboring operands\", glyph=BANG_GLYPH, is_passive=is_passive, ) self.do_draw", "BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports import InputPort, OutputPort logger = logging.getLogger(__name__) OUTPUT_PORT_NAME", "in range(length): input_port = InputPort(self.x + offset + 1, self.y) output_port = OutputPort(self.x", "y, \"clock\", \"Outputs modulo of frame\", glyph=\"c\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x", "and return the payload. This may modify the grid. Note: the frame is", "self.y), \"velocity\": InputPort( self.x + 4, self.y, default=\"f\", clamp=lambda x: min(max(0, x), 16)", "\"delay\", \"Bangs on module of frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x", "frame, force=False): low = self._grid.listen_as_value(self.ports[\"min\"]) high = self._grid.listen_as_value(self.ports[\"max\"]) value = random.randint(low, high) return", "OUTPUT_PORT_NAME: OutputPort(x, y + 1, is_sensitive=True), } ) def operation(self, frame, force=False): index", "} ) def operation(self, frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod = self._grid.listen_as_value(self.ports[\"mod\"]) out", "y, clamp=lambda x: max(1, x)), OUTPUT_PORT_NAME: OutputPort(x, y + 1), } ) def", "\"Bang if inputs are equal\", glyph=\"f\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x -", "False def operation(self, frame, force=False): self.move(-1, 0) self.is_passive = False class Jymper(IOperator): def", "grid, x, y, \"track\", \"Reads eastward operand\", glyph=\"t\", is_passive=is_passive, ) self.ports.update( { \"key\":", "module of frame\", glyph=\"d\", is_passive=is_passive, ) self.ports.update( { \"rate\": InputPort(x - 1, y,", "operation(self, frame, force=False): \"\"\"Run the operator for the given frame and return the", "output_port.y, glyph) def _output(self, glyph, port=None): if port is None: output_port = self._output_port", "self, grid, x, y, name, description, *, glyph=DOT_GLYPH, is_passive=False ): self.x = x", "} ) def operation(self, frame, force=False): a = self._grid.listen(self.ports[\"a\"]) b = self._grid.listen(self.ports[\"b\"]) return", "self._grid.listen(right_port) if value.lower() == value.upper() or value.upper() != value: return False else: return", "y, \"west\", \"Move westward or bang\", glyph=\"w\", is_passive=is_passive, ) self.do_draw = False def", "if self._grid.listen(self.ports[port_name]) == DOT_GLYPH: return note = self._grid.listen(self.ports[\"note\"]) if not NOTE_TO_INDEX: return channel", "grid. Note: the frame is assumed to match the state of the grid", "1, is_sensitive=True), } ) def operation(self, frame, force=False): rate = self._grid.listen_as_value(self.ports[\"rate\"]) mod =", "range(length): self._grid.lock(self.x + offset + 1, self.y) port = InputPort(self.x + 1 +", "), \"length\": InputPort( self.x + 5, self.y, clamp=lambda x: min(max(0, x), 32) ),", "value = glyph.upper() else: value = glyph self._grid.poke(output_port.x, output_port.y, value) class Add(IOperator): def", "y, \"substract\", \"Output difference of inputs\", glyph=\"b\", is_passive=is_passive, ) self.ports.update( { \"a\": InputPort(x", "*, is_passive=False): super().__init__( grid, x, y, \"increment\", \"Increment operator southward\", glyph=\"i\", is_passive=is_passive, )", "value = frame % (mod * rate) return value == 0 or mod", "if self._grid.is_inside(self.x, self.y): self._grid.lock(self.x, self.y) @property def _output_port(self): return self.ports.get(OUTPUT_PORT_NAME) def _has_output_port(self): return", "Track(IOperator): def __init__(self, grid, x, y, *, is_passive=False): super().__init__( grid, x, y, \"track\",", "from orca.grid import BANG_GLYPH, COMMENT_GLYPH, DOT_GLYPH, MidiNoteOnEvent from orca.ports import InputPort, OutputPort logger", "not self._grid.is_inside(new_x, new_y): self.explode() return collider = self._grid.peek(new_x, new_y) if collider not in", "1, is_sensitive=True), } ) def operation(self, frame, force=False): step = self._grid.listen_as_value(self.ports[\"step\"]) mod =", "glyph=\"s\", is_passive=is_passive, ) self.do_draw = False def operation(self, frame, force=False): self.move(0, 1) self.is_passive", "glyph = BANG_GLYPH if payload else DOT_GLYPH self._grid.poke(output_port.x, output_port.y, glyph) def _output(self, glyph," ]
[ "= pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig = go.Figure(go.Bar(x = student_df.groupby(\"level\")[\"attempt\"].mean(),y=['level1','level2','level3','level4'],orientation =", "import plotly.graph_objects as go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig", "pd import plotly.graph_objects as go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean())", "pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig = go.Figure(go.Bar(x = student_df.groupby(\"level\")[\"attempt\"].mean(),y=['level1','level2','level3','level4'],orientation = 'h'", "plotly.graph_objects as go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig =", "pandas as pd import plotly.graph_objects as go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"]", "import pandas as pd import plotly.graph_objects as go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df =", "as pd import plotly.graph_objects as go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print", "<filename>108.py import pandas as pd import plotly.graph_objects as go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df", "df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig = go.Figure(go.Bar(x = student_df.groupby(\"level\")[\"attempt\"].mean(),y=['level1','level2','level3','level4'],orientation", "student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig = go.Figure(go.Bar(x = student_df.groupby(\"level\")[\"attempt\"].mean(),y=['level1','level2','level3','level4'],orientation = 'h' ))", "as go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig = go.Figure(go.Bar(x", "go df = pd.read_csv(r\"D:\\PROGRAMS\\PYTHON\\C102\\108Data.csv\") student_df = df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig = go.Figure(go.Bar(x =", "= df.loc[df[\"student_id\"]==\"TRL_123\"] print (student_df.groupby(\"level\")[\"attempt\"].mean()) fig = go.Figure(go.Bar(x = student_df.groupby(\"level\")[\"attempt\"].mean(),y=['level1','level2','level3','level4'],orientation = 'h' )) fig.show()" ]
[ "timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) self.assertEqual(len(timer.history), 5)", "2, # 'b': 'foo', # 'z': {}, # }) # # def test_it_captures_locals_from_decorated_function(self):", "None)) def it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None,", "# context = fn() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, {", "= wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn =", "= cake class ABC(object): def __init__(self, cake, bar): self.cake = cake self.bar() class", "[ # 'it_should_read_submethods', # 'before_each', # 'before_all', # 'after_each', # 'after_all', # 'it_should_capture_this_method',", "def d(name): # @with_metadata # def decorator(fn): # @wraps(fn) # def wrapper(*args, **kwargs):", "= tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error,", "def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) class", "= CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self):", "sub_const(a): return a - 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def", "test_it_returns_False_otherwise(self): class Foobar(object): def foo(self): pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self):", "a, b): return a + b class FooBar(object): def __call__(self, a, b): return", "test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda o: (o.__init__, None) class Foo(object): def __init__(self, bar):", "context['_________describe_exception'] # self.assertEqual(context, { # 'a': 'foo', # 'b': 3, # 'name': 'lol',", "# fn = returns_locals(foo) # context = fn() # del context['sys'] # del", "class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m = Mock() subject = CallOnce(m) subject() subject() subject()", "m = Mock() subject = CallOnce(m) subject() subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject", "self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class", "import (tabulate, Benchmark, CallOnce, getargspec, func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self,", "a + b class FooBar(object): def __call__(self, a, b): return a + self.b", "(('f',), None, None, None)) def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',),", "func() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 'foo',", "'sample_func', # ] # self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self): # def describe_spec(): #", "a = 2 # b = 'foo' # z = {} # #", "mock import Mock, patch from describe.spec.utils import (tabulate, Benchmark, CallOnce, getargspec, func_equal, accepts_arg,", "# 'before_all', # 'after_each', # 'after_all', # 'it_should_capture_this_method', # 'sample_func', # ] #", "# # @d('lol') # def foo(name): # a = 'foo' # b =", "3 # # func = returns_locals(foo) # context = func() # del context['sys']", "func = returns_locals(foo) # context = func() # del context['sys'] # del context['_________describe_exception']", "@patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value = False", "# def describe_spec(): # @does_not_exist # def it_should_do_stuff(): pass # with self.assertRaises(NameError): #", "TestCase from StringIO import StringIO from functools import wraps from mock import Mock,", "self.assertEqual(context, { # 'a': 2, # 'b': 'foo', # 'z': {}, # })", "}) # # def test_it_captures_locals_from_decorated_function(self): # def d(name): # @with_metadata # def decorator(fn):", "= lambda o: (o.__call__, None) class Foo(object): def __call__(self, a, b): return a", "# del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 2, #", "class Cake(object): def __call__(self, c, b): return c + b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(),", "def it_should_read_submethods(): pass # def before_each(): pass # def before_all(): pass # def", "target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target) def", "# b = 3 # # func = returns_locals(foo) # context = func()", "test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def add(a, b): return", "= 'foo' # z = {} # # fn = returns_locals(foo) # context", "# return wrapper # return decorator # # @d('lol') # def foo(name): #", "' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n", "foo(): # a = 2 # b = 'foo' # z = {}", "(o.__init__, None) class Foo(object): def __init__(self, bar): self.bar = bar class FooBar(object): def", "= lambda a: 0 self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_class_constructor(self): class Foo(object):", "def after_all(): pass # def it_should_capture_this_method(): pass # context = locals_from_function(describe_spec) # methods", "'foo' # z = {} # # fn = returns_locals(foo) # context =", "# lol = True # def it_should_read_submethods(): pass # def before_each(): pass #", "self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2),", "before_all(): pass # def sample_func(): pass # def after_each(): pass # def after_all():", "# 'sample_func', # ] # self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self): # def describe_spec():", "self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0,", "self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb),", "foo(self): pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def", "None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',),", "class FooBar(object): def __init__(self, cake): self.bar = cake class ABC(object): def __init__(self, cake,", "def test_it_compares_callables(self, getfn): getfn.side_effect = lambda o: (o.__call__, None) class Foo(object): def __call__(self,", "def __init__(self, cake): self.bar = cake class ABC(object): def __init__(self, cake, bar): self.cake", "ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect = lambda o: (o.__call__, None)", "+ b def sub(a, b): return a - b def sub_const(a): return a", "None) class Foo(object): def __call__(self, a, b): return a + b class FooBar(object):", "True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb", "'a': 'foo', # 'b': 3, # 'name': 'lol', # }) class DescribeAcceptArgs(TestCase): def", "from StringIO import StringIO from functools import wraps from mock import Mock, patch", "def test_extracts_local_functions_with_invocation(self): # def describe_spec(): # lol = True # def it_should_read_submethods(): pass", "self.cake = cake self.bar() class Cake(object): def __init__(self, roflcopter, empty): self.bake = roflcopter", "return a + b class FooBar(object): def __call__(self, a, b): return a +", "import StringIO from functools import wraps from mock import Mock, patch from describe.spec.utils", "# # func = returns_locals(foo) # context = func() # del context['sys'] #", "FooBar(object): def __call__(self, a, b): return a + self.b class Cake(object): def __call__(self,", "@patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect = lambda o: (o.__call__, None) class Foo(object): def", "subject() def test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0)", "@does_not_exist # def it_should_do_stuff(): pass # with self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase): def", "lambda:1)) def test_it_compares_function_equality(self): def add(a, b): return a + b def new_add(a, b):", "2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect =", "m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self):", "+ b class FooBar(object): def __call__(self, a, b): return a + self.b class", "pass # with self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self): import time timer", "def test_it_returns_False_otherwise(self): class Foobar(object): def foo(self): pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def", "self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self): import time timer = Benchmark() with", "{} # # fn = returns_locals(foo) # context = fn() # del context['sys']", "f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self,", "bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def", "def test_it_preserves_function_attributes(self): m = Mock() m.__doc__ = 'my fn doc' m.__name__ = 'my_func'", "None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None,", "import time timer = Benchmark() with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer:", "StringIO import StringIO from functools import wraps from mock import Mock, patch from", "timer = Benchmark() with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with", "def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self):", "# 'after_all', # 'it_should_capture_this_method', # 'sample_func', # ] # self.assertEqual(set(context.keys()), set(methods)) # def", "b): return a + b class FooBar(object): def __call__(self, a, b): return a", "def test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject,", "pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object): def foo(self): pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo))", "__init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def", "def foo(): # a = 2 # b = 'foo' # z =", "context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 'foo', # 'b': 3,", "'lol', # }) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self):", "# def test_it_captures_locals_from_decorated_function(self): # def d(name): # @with_metadata # def decorator(fn): # @wraps(fn)", "# }) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class", "class Foobar(object): def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object): def foo(self):", "def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def", "pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self, f):", "accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception) tb =", "return decorator # # @d('lol') # def foo(name): # a = 'foo' #", "0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m = Mock()", "a - 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn):", "# @wraps(fn) # def wrapper(*args, **kwargs): # return fn(name, *args, **kwargs) # return", "= {} # # fn = returns_locals(foo) # context = fn() # del", "def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object):", "# }) # # def test_it_captures_locals_from_decorated_function(self): # def d(name): # @with_metadata # def", "def test_it_can_call_wrapped_fn_once(self): m = Mock() subject = CallOnce(m) subject() subject() subject() m.assert_called_once_with() def", "context = func() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { #", "# locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self): import time timer = Benchmark() with timer:", "StringIO from functools import wraps from mock import Mock, patch from describe.spec.utils import", "bar class FooBar(object): def __init__(self, cake): self.bar = cake class ABC(object): def __init__(self,", "with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) self.assertEqual(len(timer.history), 5) self.assertTrue(timer.total_time >", "tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): # def", "'it_should_read_submethods', # 'before_each', # 'before_all', # 'after_each', # 'after_all', # 'it_should_capture_this_method', # 'sample_func',", "subject = CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m", "class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value", "}) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object):", "= True # def it_should_read_submethods(): pass # def before_each(): pass # def before_all():", "'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0))", "(tabulate, Benchmark, CallOnce, getargspec, func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception):", "= 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar'", "new_add(a, b): return a + b def sub(a, b): return a - b", "# 'a': 2, # 'b': 'foo', # 'z': {}, # }) # #", "describe_spec(): # lol = True # def it_should_read_submethods(): pass # def before_each(): pass", "'after_each', # 'after_all', # 'it_should_capture_this_method', # 'sample_func', # ] # self.assertEqual(set(context.keys()), set(methods)) #", "# a = 'foo' # b = 3 # # func = returns_locals(foo)", "functools import wraps from mock import Mock, patch from describe.spec.utils import (tabulate, Benchmark,", "tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): # def foo(): # a =", "@wraps(fn) # def wrapper(*args, **kwargs): # return fn(name, *args, **kwargs) # return wrapper", "# with self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self): import time timer =", "# 'after_each', # 'after_all', # 'it_should_capture_this_method', # 'sample_func', # ] # self.assertEqual(set(context.keys()), set(methods))", "> 0.09) def test_benchmark_multiple(self): import time timer = Benchmark() with timer: time.sleep(0.02) with", "class Foo(object): def __init__(self, bar): self.bar = bar class FooBar(object): def __init__(self, cake):", "return a + self.b class Cake(object): def __call__(self, c, b): return c +", "subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m = Mock() m.__doc__ = 'my fn", "# return fn(name, *args, **kwargs) # return wrapper # return decorator # #", "'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): # def foo(): #", "# def decorator(fn): # @wraps(fn) # def wrapper(*args, **kwargs): # return fn(name, *args,", "Cake(object): def __init__(self, roflcopter, empty): self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo,", "timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) self.assertEqual(len(timer.history), 5) self.assertTrue(timer.total_time > 0.09)", "foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def", "# def foo(name): # a = 'foo' # b = 3 # #", "returns_locals(foo) # context = func() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context,", "decorator(fn): # @wraps(fn) # def wrapper(*args, **kwargs): # return fn(name, *args, **kwargs) #", "ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0),", "Foo(object): def __call__(self, a, b): return a + b class FooBar(object): def __call__(self,", "'foo', # 'b': 3, # 'name': 'lol', # }) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self):", "def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject() subject() def test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject,", "3, # 'name': 'lol', # }) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass", "tb.__contains__.return_value = False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error,", "time timer = Benchmark() with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02)", "# def before_each(): pass # def before_all(): pass # def sample_func(): pass #", "def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn = lambda a: 0 self.assertEqual(getargspec(fn),", "class Foo(object): def __init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self):", "describe_spec(): # @does_not_exist # def it_should_do_stuff(): pass # with self.assertRaises(NameError): # locals_from_function(describe_spec) class", "self.bar = bar class FooBar(object): def __init__(self, cake): self.bar = cake class ABC(object):", "def describe_spec(): # lol = True # def it_should_read_submethods(): pass # def before_each():", "def it_returns_argspec_of_functions(self): fn = lambda a: 0 self.assertEqual(getargspec(fn), (('a',), None, None, None)) def", "test_it_compares_function_equality(self): def add(a, b): return a + b def new_add(a, b): return a", "'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo')", "it_should_do_stuff(): pass # with self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self): import time", "= returns_locals(foo) # context = fn() # del context['sys'] # del context['_________describe_exception'] #", "test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn = lambda a: 0 self.assertEqual(getargspec(fn), (('a',),", "unittest import TestCase from StringIO import StringIO from functools import wraps from mock", "# context = func() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, {", "bar): self.bar = bar class FooBar(object): def __init__(self, cake): self.bar = cake class", "error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): #", "lol = True # def it_should_read_submethods(): pass # def before_each(): pass # def", "'b': 'foo', # 'z': {}, # }) # # def test_it_captures_locals_from_decorated_function(self): # def", "cake, bar): self.cake = cake self.bar() class Cake(object): def __init__(self, roflcopter, empty): self.bake", "{ # 'a': 2, # 'b': 'foo', # 'z': {}, # }) #", "= locals_from_function(describe_spec) # methods = [ # 'it_should_read_submethods', # 'before_each', # 'before_all', #", "test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self):", "- 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect", "DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): # def foo(): # a = 2 # b", "sub(a, b): return a - b def sub_const(a): return a - 2 self.assertTrue(func_equal(add,", "it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): #", "wrapper(*args, **kwargs): # return fn(name, *args, **kwargs) # return wrapper # return decorator", "pass # def before_all(): pass # def sample_func(): pass # def after_each(): pass", "TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def", "self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'),", "b = 3 # # func = returns_locals(foo) # context = func() #", "test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase):", "{ # 'a': 'foo', # 'b': 3, # 'name': 'lol', # }) class", "decorator # # @d('lol') # def foo(name): # a = 'foo' # b", "# def test_it_captures_locals_from_function(self): # def foo(): # a = 2 # b =", "# 'z': {}, # }) # # def test_it_captures_locals_from_decorated_function(self): # def d(name): #", "lambda a: 0 self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_class_constructor(self): class Foo(object): def", "fn = CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) class DescribeCallOnce(TestCase): def", "# 'it_should_capture_this_method', # 'sample_func', # ] # self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self): #", "CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m = Mock()", "def test_benchmark_multiple(self): import time timer = Benchmark() with timer: time.sleep(0.02) with timer: time.sleep(0.02)", "import sys from unittest import TestCase from StringIO import StringIO from functools import", "\"bar\") #class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): # def foo(): # a = 2", "import TestCase from StringIO import StringIO from functools import wraps from mock import", "self.assertEqual(getargspec(fn), (('a',), None, None, None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m = Mock() subject", "self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase): #", "a = 'foo' # b = 3 # # func = returns_locals(foo) #", "def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def add(a, b):", "wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda", "class TestBenchmark(TestCase): def test_benchmark(self): import time timer = Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1]", "False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\")", "# 'a': 'foo', # 'b': 3, # 'name': 'lol', # }) class DescribeAcceptArgs(TestCase):", "subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject() subject() def test_its_equal_with_None(self): subject =", "set(methods)) # def test_reraises_any_exceptions_thrown(self): # def describe_spec(): # @does_not_exist # def it_should_do_stuff(): pass", "time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self): import time timer = Benchmark() with timer:", "class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def", "add(a, b): return a + b def new_add(a, b): return a + b", "lambda o: (o.__init__, None) class Foo(object): def __init__(self, bar): self.bar = bar class", "= 2 # b = 'foo' # z = {} # # fn", "del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 2, # 'b': 'foo', # 'z':", "foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): #", "= CallOnce(None) subject() subject() def test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self):", "__init__(self, cake): self.bar = cake class ABC(object): def __init__(self, cake, bar): self.cake =", "def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class", "# def wrapper(*args, **kwargs): # return fn(name, *args, **kwargs) # return wrapper #", "def test_it_captures_locals_from_function(self): # def foo(): # a = 2 # b = 'foo'", "self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class", "None, None, None)) def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None,", "= 'super.awesome.module' m.func_name = 'my_fn' m.func_code = Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my", "= CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m = Mock() m.__doc__ = 'my fn doc'", "# # fn = returns_locals(foo) # context = fn() # del context['sys'] #", "'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1))", "fn = returns_locals(foo) # context = fn() # del context['sys'] # del context['_________describe_exception']", "__call__(self, c, b): return c + b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase):", "m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject() subject() def test_its_equal_with_None(self): subject = CallOnce(None)", "pass # def before_each(): pass # def before_all(): pass # def sample_func(): pass", "# a = 2 # b = 'foo' # z = {} #", "= cake self.bar() class Cake(object): def __init__(self, roflcopter, empty): self.bake = roflcopter self.assertTrue(func_equal(Foo,", "class FooBar(object): def __call__(self, a, b): return a + self.b class Cake(object): def", "def describe_spec(): # @does_not_exist # def it_should_do_stuff(): pass # with self.assertRaises(NameError): # locals_from_function(describe_spec)", "class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): # def describe_spec(): # lol = True #", "tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\")", "a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object): def foo(self): pass def foo(): pass", "CallOnce(None)) def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object())", "b def new_add(a, b): return a + b def sub(a, b): return a", "**kwargs) # return wrapper # return decorator # # @d('lol') # def foo(name):", "= func() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a':", "test_it_captures_locals_from_function(self): # def foo(): # a = 2 # b = 'foo' #", "DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m = Mock() subject = CallOnce(m) subject() subject() subject() m.assert_called_once_with()", "bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') #", "with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self): import time timer = Benchmark()", "a + b def sub(a, b): return a - b def sub_const(a): return", "from mock import Mock, patch from describe.spec.utils import (tabulate, Benchmark, CallOnce, getargspec, func_equal,", "test_it_compares_callables(self, getfn): getfn.side_effect = lambda o: (o.__call__, None) class Foo(object): def __call__(self, a,", "def wrapper(*args, **kwargs): # return fn(name, *args, **kwargs) # return wrapper # return", "'before_all', # 'after_each', # 'after_all', # 'it_should_capture_this_method', # 'sample_func', # ] # self.assertEqual(set(context.keys()),", "'it_should_capture_this_method', # 'sample_func', # ] # self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self): # def", "= lambda o: (o.__init__, None) class Foo(object): def __init__(self, bar): self.bar = bar", "CallOnce, getargspec, func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error =", "# def d(name): # @with_metadata # def decorator(fn): # @wraps(fn) # def wrapper(*args,", "subject() subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject() subject() def test_its_equal_with_None(self):", "return a - 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self,", "sample_func(): pass # def after_each(): pass # def after_all(): pass # def it_should_capture_this_method():", "self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn = lambda a:", "self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0)", "@with_metadata # def decorator(fn): # @wraps(fn) # def wrapper(*args, **kwargs): # return fn(name,", "cake self.bar() class Cake(object): def __init__(self, roflcopter, empty): self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar))", "= bar class FooBar(object): def __init__(self, cake): self.bar = cake class ABC(object): def", "getfn.side_effect = lambda o: (o.__init__, None) class Foo(object): def __init__(self, bar): self.bar =", "self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect = lambda o: (o.__call__,", "foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn =", "None, None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m = Mock() subject = CallOnce(m) subject()", "'my_fn' m.func_code = Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__, 'my_func')", "self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda", "= True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self):", "def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None))", "# 'name': 'lol', # }) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo))", "subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject() subject() def test_its_equal_with_None(self): subject", "locals_from_function(describe_spec) # methods = [ # 'it_should_read_submethods', # 'before_each', # 'before_all', # 'after_each',", "'my fn doc' m.__name__ = 'my_func' m.__module__ = 'super.awesome.module' m.func_name = 'my_fn' m.func_code", "context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 2, # 'b': 'foo',", "filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception) tb = Mock(spec=TracebackType)", "class Foo(object): def __call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_wrapped_function(self):", "'\\n\\nfoo') # class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): # def describe_spec(): # lol =", "self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2,", "= 3 # # func = returns_locals(foo) # context = func() # del", "self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def add(a, b): return a", "a + b def new_add(a, b): return a + b def sub(a, b):", "test_benchmark_multiple(self): import time timer = Benchmark() with timer: time.sleep(0.02) with timer: time.sleep(0.02) with", "self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject =", "= roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self,", "fn = wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn", "self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo',", "from describe.spec.utils import (tabulate, Benchmark, CallOnce, getargspec, func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception')", "cake class ABC(object): def __init__(self, cake, bar): self.cake = cake self.bar() class Cake(object):", "def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) def", "def add(a, b): return a + b def new_add(a, b): return a +", "test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self, a): pass", "def before_each(): pass # def before_all(): pass # def sample_func(): pass # def", "a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m =", "'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(),", "'z': {}, # }) # # def test_it_captures_locals_from_decorated_function(self): # def d(name): # @with_metadata", "= Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module')", "+ b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n", "+ b def new_add(a, b): return a + b def sub(a, b): return", "# self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self): # def describe_spec(): # @does_not_exist # def", "'b': 3, # 'name': 'lol', # }) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a):", "# def foo(): # a = 2 # b = 'foo' # z", "(('a',), None, None, None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m = Mock() subject =", "o: (o.__init__, None) class Foo(object): def __init__(self, bar): self.bar = bar class FooBar(object):", "class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo')", "Cake(object): def __call__(self, c, b): return c + b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake()))", "self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a: 0) self.assertEqual(getargspec(fn),", "target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): # def", "= 'my_func' m.__module__ = 'super.awesome.module' m.func_name = 'my_fn' m.func_code = Mock() subject =", "__call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda", "doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self):", "b): return a + b def sub(a, b): return a - b def", "bar): self.cake = cake self.bar() class Cake(object): def __init__(self, roflcopter, empty): self.bake =", "timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self): import time timer = Benchmark() with", "b): return a + b def new_add(a, b): return a + b def", "None, None, None)) def it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',),", "getfn): getfn.side_effect = lambda o: (o.__call__, None) class Foo(object): def __call__(self, a, b):", "# self.assertEqual(context, { # 'a': 'foo', # 'b': 3, # 'name': 'lol', #", "0.09) def test_benchmark_multiple(self): import time timer = Benchmark() with timer: time.sleep(0.02) with timer:", "timer = Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self): import time", "{}, # }) # # def test_it_captures_locals_from_decorated_function(self): # def d(name): # @with_metadata #", "lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def add(a, b): return a +", "] # self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self): # def describe_spec(): # @does_not_exist #", "= Benchmark() with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer:", "# 'b': 3, # 'name': 'lol', # }) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def", "times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): # def describe_spec(): # lol", "@d('lol') # def foo(name): # a = 'foo' # b = 3 #", "foo(name): # a = 'foo' # b = 3 # # func =", "target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception,", "CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject", "# @d('lol') # def foo(name): # a = 'foo' # b = 3", "def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn", "d(name): # @with_metadata # def decorator(fn): # @wraps(fn) # def wrapper(*args, **kwargs): #", "pass # def it_should_capture_this_method(): pass # context = locals_from_function(describe_spec) # methods = [", "fn(name, *args, **kwargs) # return wrapper # return decorator # # @d('lol') #", "FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self):", "self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def", "subject() subject() def test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject =", "self.bar = cake class ABC(object): def __init__(self, cake, bar): self.cake = cake self.bar()", "# def after_each(): pass # def after_all(): pass # def it_should_capture_this_method(): pass #", "del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 2, # 'b':", "= [ # 'it_should_read_submethods', # 'before_each', # 'before_all', # 'after_each', # 'after_all', #", "class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn = lambda a: 0 self.assertEqual(getargspec(fn), (('a',), None, None,", "o: (o.__call__, None) class Foo(object): def __call__(self, a, b): return a + b", "it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) class DescribeCallOnce(TestCase):", "cake): self.bar = cake class ABC(object): def __init__(self, cake, bar): self.cake = cake", "def it_should_capture_this_method(): pass # context = locals_from_function(describe_spec) # methods = [ # 'it_should_read_submethods',", "a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a:", "def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject))", "self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m = Mock() m.__doc__ = 'my fn doc' m.__name__ =", "TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): # def describe_spec(): # lol = True # def", "class Foobar(object): def foo(self): pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None))", "def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def", "(o.__call__, None) class Foo(object): def __call__(self, a, b): return a + b class", "a: 0 self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self,", "m.__doc__ = 'my fn doc' m.__name__ = 'my_func' m.__module__ = 'super.awesome.module' m.func_name =", "Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect = lambda o: (o.__call__, None) class Foo(object):", "2 # b = 'foo' # z = {} # # fn =", "a - b def sub_const(a): return a - 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub))", "wrapper # return decorator # # @d('lol') # def foo(name): # a =", "test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object): def", "'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1))", "CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject))", "context['_________describe_exception'] # self.assertEqual(context, { # 'a': 2, # 'b': 'foo', # 'z': {},", "TestBenchmark(TestCase): def test_benchmark(self): import time timer = Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] >", "FooBar(object): def __init__(self, cake): self.bar = cake class ABC(object): def __init__(self, cake, bar):", "Mock() subject = CallOnce(m) subject() subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None)", "subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self):", "foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n foo')", "Mock, patch from describe.spec.utils import (tabulate, Benchmark, CallOnce, getargspec, func_equal, accepts_arg, filter_traceback) class", "'my_func' m.__module__ = 'super.awesome.module' m.func_name = 'my_fn' m.func_code = Mock() subject = CallOnce(m)", "func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception) tb", "def sub_const(a): return a - 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function')", "CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject =", "def sub(a, b): return a - b def sub_const(a): return a - 2", "None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None,", "self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self): # def describe_spec(): # @does_not_exist # def it_should_do_stuff():", "None, None)) def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None,", "True # def it_should_read_submethods(): pass # def before_each(): pass # def before_all(): pass", "- b def sub_const(a): return a - 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub,", "def __call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_wrapped_function(self): fn =", "def __call__(self, c, b): return c + b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class", "self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): # def describe_spec(): #", "getargspec, func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception)", "= 'my_fn' m.func_code = Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__,", "= Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self): import time timer", "it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def", "with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) self.assertEqual(len(timer.history),", "@patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda o: (o.__init__, None) class Foo(object): def", "timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer:", "= fn() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a':", "roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn):", "print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect = lambda", "context = fn() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { #", "a, b): return a + self.b class Cake(object): def __call__(self, c, b): return", "return a + b def new_add(a, b): return a + b def sub(a,", "# def it_should_read_submethods(): pass # def before_each(): pass # def before_all(): pass #", "= MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value = False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value =", "def test_benchmark(self): import time timer = Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09)", "# del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 'foo', # 'b': 3, #", "(('a',), None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',),", "Mock() m.__doc__ = 'my fn doc' m.__name__ = 'my_func' m.__module__ = 'super.awesome.module' m.func_name", "# del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 2, # 'b': 'foo', #", "# del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 'foo', #", "test_it_can_call_wrapped_fn_once(self): m = Mock() subject = CallOnce(m) subject() subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self):", "self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn = lambda a: 0 self.assertEqual(getargspec(fn), (('a',), None,", "DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value =", "it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def", "getfn.side_effect = lambda o: (o.__call__, None) class Foo(object): def __call__(self, a, b): return", "# z = {} # # fn = returns_locals(foo) # context = fn()", "self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self, f): pass", "DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn = lambda a: 0 self.assertEqual(getargspec(fn), (('a',), None, None, None))", "def new_add(a, b): return a + b def sub(a, b): return a -", "fn doc' m.__name__ = 'my_func' m.__module__ = 'super.awesome.module' m.func_name = 'my_fn' m.func_code =", "time timer = Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self): import", "return fn(name, *args, **kwargs) # return wrapper # return decorator # # @d('lol')", "def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object): def foo(self): pass def", "it_returns_argspec_of_functions(self): fn = lambda a: 0 self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_class_constructor(self):", "+ self.b class Cake(object): def __call__(self, c, b): return c + b self.assertFalse(func_equal(Foo(),", "'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect = lambda o:", "# def describe_spec(): # lol = True # def it_should_read_submethods(): pass # def", "b = 'foo' # z = {} # # fn = returns_locals(foo) #", "# 'b': 'foo', # 'z': {}, # }) # # def test_it_captures_locals_from_decorated_function(self): #", "from functools import wraps from mock import Mock, patch from describe.spec.utils import (tabulate,", "# context = locals_from_function(describe_spec) # methods = [ # 'it_should_read_submethods', # 'before_each', #", "b): return c + b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self):", "m.__name__ = 'my_func' m.__module__ = 'super.awesome.module' m.func_name = 'my_fn' m.func_code = Mock() subject", "Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name,", "Foobar(object): def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object): def foo(self): pass", "context = locals_from_function(describe_spec) # methods = [ # 'it_should_read_submethods', # 'before_each', # 'before_all',", "fn() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 2,", "= Mock(spec=TracebackType) tb.__contains__.return_value = False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value =", "= CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m =", "def after_each(): pass # def after_all(): pass # def it_should_capture_this_method(): pass # context", "self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda o: (o.__init__, None) class", "self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m = Mock() m.__doc__", "__init__(self, bar): self.bar = bar class FooBar(object): def __init__(self, cake): self.bar = cake", "new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda o:", "= CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self):", "def test_it_captures_locals_from_decorated_function(self): # def d(name): # @with_metadata # def decorator(fn): # @wraps(fn) #", "Foobar(object): def foo(self): pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class", "'foo' # b = 3 # # func = returns_locals(foo) # context =", "None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None,", "# def test_reraises_any_exceptions_thrown(self): # def describe_spec(): # @does_not_exist # def it_should_do_stuff(): pass #", "CallOnce(m) subject() subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject() subject() def", "format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase):", "def test_reraises_any_exceptions_thrown(self): # def describe_spec(): # @does_not_exist # def it_should_do_stuff(): pass # with", "pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn = lambda", "# b = 'foo' # z = {} # # fn = returns_locals(foo)", "m.func_code = Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__,", "pass # def sample_func(): pass # def after_each(): pass # def after_all(): pass", "# @does_not_exist # def it_should_do_stuff(): pass # with self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase):", "time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) self.assertEqual(len(timer.history), 5) self.assertTrue(timer.total_time", "test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): # def describe_spec():", "test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject)) def", "def foo(self): pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase):", "b): return a + self.b class Cake(object): def __call__(self, c, b): return c", "Foo(object): def __call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_wrapped_function(self): fn", "**kwargs): # return fn(name, *args, **kwargs) # return wrapper # return decorator #", "it_should_read_submethods(): pass # def before_each(): pass # def before_all(): pass # def sample_func():", "# def sample_func(): pass # def after_each(): pass # def after_all(): pass #", "MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value = False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True", "# # def test_it_captures_locals_from_decorated_function(self): # def d(name): # @with_metadata # def decorator(fn): #", "error = MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value = False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value", "foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object): def foo(self): pass def foo():", "getfn): getfn.side_effect = lambda o: (o.__init__, None) class Foo(object): def __init__(self, bar): self.bar", "= CallOnce(m) subject() subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject() subject()", "self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a: 0) self.assertEqual(getargspec(fn),", "empty): self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function')", "def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self):", "#class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): # def foo(): # a = 2 #", "def __init__(self, bar): self.bar = bar class FooBar(object): def __init__(self, cake): self.bar =", "def test_it_compares_function_equality(self): def add(a, b): return a + b def new_add(a, b): return", "subject = CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def", "return a + b def sub(a, b): return a - b def sub_const(a):", "Benchmark() with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02)", "def it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None))", "= Mock() subject = CallOnce(m) subject() subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject =", "'my fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase):", "from unittest import TestCase from StringIO import StringIO from functools import wraps from", "return wrapper # return decorator # # @d('lol') # def foo(name): # a", "self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self): fn = lambda a: 0", "= False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb),", "self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect = lambda o: (o.__call__, None) class", "None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m = Mock() subject = CallOnce(m) subject() subject()", "doc' m.__name__ = 'my_func' m.__module__ = 'super.awesome.module' m.func_name = 'my_fn' m.func_code = Mock()", "test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo',", "*args, **kwargs) # return wrapper # return decorator # # @d('lol') # def", "# methods = [ # 'it_should_read_submethods', # 'before_each', # 'before_all', # 'after_each', #", "def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self):", "patch from describe.spec.utils import (tabulate, Benchmark, CallOnce, getargspec, func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase):", "def before_all(): pass # def sample_func(): pass # def after_each(): pass # def", "format_exception): error = MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value = False target = tb.tb_next.tb_next.tb_next", "'a': 2, # 'b': 'foo', # 'z': {}, # }) # # def", "test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m = Mock() m.__doc__ = 'my", "test_reraises_any_exceptions_thrown(self): # def describe_spec(): # @does_not_exist # def it_should_do_stuff(): pass # with self.assertRaises(NameError):", "b def sub_const(a): return a - 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const))", "methods = [ # 'it_should_read_submethods', # 'before_each', # 'before_all', # 'after_each', # 'after_all',", "def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): # def", "z = {} # # fn = returns_locals(foo) # context = fn() #", "CallOnce(None) subject() subject() def test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject", "test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self):", "# 'before_each', # 'before_all', # 'after_each', # 'after_all', # 'it_should_capture_this_method', # 'sample_func', #", "self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self): class Foobar(object): def foo(self): pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo))", "# return decorator # # @d('lol') # def foo(name): # a = 'foo'", "self.assertFalse(func_equal(new_add, sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda o: (o.__init__,", "del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 'foo', # 'b':", "self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect", "= CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0)) def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(object()) self.assertFalse(bool(subject)) def test_its_truthiness_if_wrapped_is_callable(self): subject", "def test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda o: (o.__init__, None) class Foo(object): def __init__(self,", "self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def", "self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self, f): pass", "locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self): import time timer = Benchmark() with timer: time.sleep(0.1)", "self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def add(a, b): return a + b def new_add(a,", "= Mock() m.__doc__ = 'my fn doc' m.__name__ = 'my_func' m.__module__ = 'super.awesome.module'", "def foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo))", "def __init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object):", "after_each(): pass # def after_all(): pass # def it_should_capture_this_method(): pass # context =", "class Cake(object): def __init__(self, roflcopter, empty): self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal'", "test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar',", "def decorator(fn): # @wraps(fn) # def wrapper(*args, **kwargs): # return fn(name, *args, **kwargs)", "it_should_capture_this_method(): pass # context = locals_from_function(describe_spec) # methods = [ # 'it_should_read_submethods', #", "fn = lambda a: 0 self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_class_constructor(self): class", "test_benchmark(self): import time timer = Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def", "Mock(spec=TracebackType) tb.__contains__.return_value = False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value = 'foo'", "pass # def after_all(): pass # def it_should_capture_this_method(): pass # context = locals_from_function(describe_spec)", "# self.assertEqual(context, { # 'a': 2, # 'b': 'foo', # 'z': {}, #", "__call__(self, a, b): return a + self.b class Cake(object): def __call__(self, c, b):", "test_extracts_local_functions_with_invocation(self): # def describe_spec(): # lol = True # def it_should_read_submethods(): pass #", "import time timer = Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self):", "test_it_preserves_function_attributes(self): m = Mock() m.__doc__ = 'my fn doc' m.__name__ = 'my_func' m.__module__", "'before_each', # 'before_all', # 'after_each', # 'after_all', # 'it_should_capture_this_method', # 'sample_func', # ]", "CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code)", "# def before_all(): pass # def sample_func(): pass # def after_each(): pass #", "class ABC(object): def __init__(self, cake, bar): self.cake = cake self.bar() class Cake(object): def", "self.b class Cake(object): def __call__(self, c, b): return c + b self.assertFalse(func_equal(Foo(), FooBar()))", "Foo(object): def __init__(self, bar): self.bar = bar class FooBar(object): def __init__(self, cake): self.bar", "def __init__(self, cake, bar): self.cake = cake self.bar() class Cake(object): def __init__(self, roflcopter,", "returns_locals(foo) # context = fn() # del context['sys'] # del context['_________describe_exception'] # self.assertEqual(context,", "__init__(self, roflcopter, empty): self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo,", "def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self, a):", "def it_should_do_stuff(): pass # with self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self): import", "self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1,", "del context['_________describe_exception'] # self.assertEqual(context, { # 'a': 'foo', # 'b': 3, # 'name':", "# class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self): # def describe_spec(): # lol = True", "import Mock, patch from describe.spec.utils import (tabulate, Benchmark, CallOnce, getargspec, func_equal, accepts_arg, filter_traceback)", "describe.spec.utils import (tabulate, Benchmark, CallOnce, getargspec, func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def", "0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a: 0)", "test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject, CallOnce(None)) def test_its_equal_with_like_function(self): subject = CallOnce(lambda:0) self.assertEqual(subject, CallOnce(lambda:0))", "m = Mock() m.__doc__ = 'my fn doc' m.__name__ = 'my_func' m.__module__ =", "sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda o: (o.__init__, None) class Foo(object):", "# def it_should_capture_this_method(): pass # context = locals_from_function(describe_spec) # methods = [ #", "(('f',), None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self, f): pass self.assertEqual(getargspec(Foo),", "b def sub(a, b): return a - b def sub_const(a): return a -", "CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m", "def foo(name): # a = 'foo' # b = 3 # # func", "pass # context = locals_from_function(describe_spec) # methods = [ # 'it_should_read_submethods', # 'before_each',", "tb = Mock(spec=TracebackType) tb.__contains__.return_value = False target = tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value", "Foo(object): def __init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_class_call_magicmethod(self): class", "def sample_func(): pass # def after_each(): pass # def after_all(): pass # def", "FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake)) @patch('describe.spec.utils.get_true_function') def test_it_compares_callables(self, getfn): getfn.side_effect =", "import wraps from mock import Mock, patch from describe.spec.utils import (tabulate, Benchmark, CallOnce,", "c, b): return c + b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def", "pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self, a): pass self.assertTrue(accepts_arg(Foobar().foo)) def test_it_returns_False_otherwise(self):", "None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self): fn = CallOnce(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None))", "__call__(self, a, b): return a + b class FooBar(object): def __call__(self, a, b):", "wraps from mock import Mock, patch from describe.spec.utils import (tabulate, Benchmark, CallOnce, getargspec,", "# 'it_should_read_submethods', # 'before_each', # 'before_all', # 'after_each', # 'after_all', # 'it_should_capture_this_method', #", "0 self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self, f):", "'\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar') def test_tabulation_by_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=2), '\\n\\n", "ABC(object): def __init__(self, cake, bar): self.cake = cake self.bar() class Cake(object): def __init__(self,", "def it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value = False target", "it_should_stop_emitting_when_marker_is_found(self, format_exception): error = MagicMock(spec=Exception) tb = Mock(spec=TracebackType) tb.__contains__.return_value = False target =", "lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def add(a, b): return a + b def", "pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a: 0)", "None) class Foo(object): def __init__(self, bar): self.bar = bar class FooBar(object): def __init__(self,", "return a - b def sub_const(a): return a - 2 self.assertTrue(func_equal(add, new_add)) self.assertFalse(func_equal(new_add,", "= 'my fn doc' m.__name__ = 'my_func' m.__module__ = 'super.awesome.module' m.func_name = 'my_fn'", "class Foo(object): def __call__(self, a, b): return a + b class FooBar(object): def", "def __call__(self, a, b): return a + self.b class Cake(object): def __call__(self, c,", "self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): # def foo(): # a", "m.__module__ = 'super.awesome.module' m.func_name = 'my_fn' m.func_code = Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__,", "before_each(): pass # def before_all(): pass # def sample_func(): pass # def after_each():", "self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self): import time timer = Benchmark() with timer: time.sleep(0.02)", "\"foo\") format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb = 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class", "a + self.b class Cake(object): def __call__(self, c, b): return c + b", "Benchmark, CallOnce, getargspec, func_equal, accepts_arg, filter_traceback) class DescribeFilteredTraceback(TestCase): @patch('traceback.format_exception') def it_should_stop_emitting_when_marker_is_found(self, format_exception): error", "self.bar() class Cake(object): def __init__(self, roflcopter, empty): self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar)) print", "None, None, None)) class DescribeCallOnce(TestCase): def test_it_can_call_wrapped_fn_once(self): m = Mock() subject = CallOnce(m)", "'foo', # 'z': {}, # }) # # def test_it_captures_locals_from_decorated_function(self): # def d(name):", "pass def foo(): pass self.assertFalse(accepts_arg(Foobar().foo)) self.assertFalse(accepts_arg(foo)) def test_it_returns_False_when_non_function(self): self.assertFalse(accepts_arg(None)) class DescribeIntegrationGetArgSpec(TestCase): def it_returns_argspec_of_functions(self):", "self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True),", "def test_its_truthiness_if_wrapped_is_callable(self): subject = CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m = Mock() m.__doc__ =", "b class FooBar(object): def __call__(self, a, b): return a + self.b class Cake(object):", "= returns_locals(foo) # context = func() # del context['sys'] # del context['_________describe_exception'] #", "'super.awesome.module' m.func_name = 'my_fn' m.func_code = Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my fn", "b): return a - b def sub_const(a): return a - 2 self.assertTrue(func_equal(add, new_add))", "# def it_should_do_stuff(): pass # with self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self):", "class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def", "'name': 'lol', # }) class DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo)) def", "self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def", "subject = CallOnce(m) subject() subject() subject() m.assert_called_once_with() def test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject()", "f): pass self.assertEqual(getargspec(Foo), (('f',), None, None, None)) def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a:", "subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn')", "CallOnce(lambda:0) self.assertTrue(bool(subject)) def test_it_preserves_function_attributes(self): m = Mock() m.__doc__ = 'my fn doc' m.__name__", "Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n", "# def after_all(): pass # def it_should_capture_this_method(): pass # context = locals_from_function(describe_spec) #", "= 'bar' self.assertEqual(filter_traceback(Mock(), tb), \"bar\") #class DescribeFnReturnsLocals(TestCase): # def test_it_captures_locals_from_function(self): # def foo():", "sub)) self.assertFalse(func_equal(sub, sub_const)) @patch('describe.spec.utils.get_true_function') def test_it_compares_class_constructors(self, getfn): getfn.side_effect = lambda o: (o.__init__, None)", "times=2), '\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase): # def", "# func = returns_locals(foo) # context = func() # del context['sys'] # del", "# @with_metadata # def decorator(fn): # @wraps(fn) # def wrapper(*args, **kwargs): # return", "# def test_extracts_local_functions_with_invocation(self): # def describe_spec(): # lol = True # def it_should_read_submethods():", "TestFuncEqual(TestCase): def test_it_compares_lambda_function_equality(self): self.assertTrue(func_equal(lambda:0, lambda:0)) self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def add(a,", "def __call__(self, a, b): return a + b class FooBar(object): def __call__(self, a,", "(('a',), None, None, None)) def it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self, f): pass self.assertEqual(getargspec(Foo),", "'\\n\\n foo') def test_tabulation_by_zero_times(self): self.assertEqual(tabulate('\\n\\nfoo', times=0), '\\n\\nfoo') # class TestLocalsFromFunction(TestCase): # def test_extracts_local_functions_with_invocation(self):", "test_it_captures_locals_from_decorated_function(self): # def d(name): # @with_metadata # def decorator(fn): # @wraps(fn) # def", "with self.assertRaises(NameError): # locals_from_function(describe_spec) class TestBenchmark(TestCase): def test_benchmark(self): import time timer = Benchmark()", "sys from unittest import TestCase from StringIO import StringIO from functools import wraps", "format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target) def it_should_return_traceback_if_its_not_a_traceback_type(self): tb =", "time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02)", "roflcopter, empty): self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC)) self.assertFalse(func_equal(Foo, Cake))", "foo\\n bar') def test_tabulation_does_not_insert_spaces_between_double_newlines(self): self.assertEqual(tabulate('\\n\\nfoo'), '\\n\\n foo') def test_tabulation_ignores_first_line(self): self.assertEqual(tabulate('foo\\nbar', ignore_first=True), 'foo\\n bar')", "fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code, m.func_code) class TestFuncEqual(TestCase): def", "return c + b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'),", "Benchmark() with timer: time.sleep(0.1) self.assertTrue(timer.history[-1] > 0.09) def test_benchmark_multiple(self): import time timer =", "c + b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), '", "= 'foo' # b = 3 # # func = returns_locals(foo) # context", "tb.tb_next.tb_next.tb_next target.tb_frame.f_globals.__contains__.return_value = True format_exception.return_value = 'foo' self.assertEqual(filter_traceback(error, tb), \"foo\") format_exception.assert_called_once_with(Exception, error, target)", "it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None)) def it_returns_argspec_of_wrapped_function_with_CallOnce(self):", "def __init__(self, roflcopter, empty): self.bake = roflcopter self.assertTrue(func_equal(Foo, FooBar)) print 'equal' self.assertFalse(func_equal(Foo, ABC))", "None)) def it_returns_argspec_of_wrapped_function(self): fn = wraps(lambda a: 0) self.assertEqual(getargspec(fn), (('a',), None, None, None))", "m.func_name = 'my_fn' m.func_code = Mock() subject = CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc')", "= CallOnce(m) self.assertEqual(subject.__doc__, 'my fn doc') self.assertEqual(subject.__name__, 'my_func') self.assertEqual(subject.__module__, 'super.awesome.module') self.assertEqual(subject.func_name, 'my_fn') self.assertEqual(subject.func_code,", "# ] # self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self): # def describe_spec(): # @does_not_exist", "subject = CallOnce(None) subject() subject() def test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject, CallOnce(None)) def", "self.assertTrue(func_equal(lambda:1, lambda:1)) self.assertFalse(func_equal(lambda:2, lambda:1)) def test_it_compares_function_equality(self): def add(a, b): return a + b", "with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with timer: time.sleep(0.02) with", "None)) def it_returns_argspec_of_class_call_magicmethod(self): class Foo(object): def __call__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None, None,", "test_it_does_nothing_for_wrapping_None(self): subject = CallOnce(None) subject() subject() def test_its_equal_with_None(self): subject = CallOnce(None) self.assertEqual(subject, CallOnce(None))", "lambda o: (o.__call__, None) class Foo(object): def __call__(self, a, b): return a +", "b self.assertFalse(func_equal(Foo(), FooBar())) self.assertTrue(func_equal(Foo(), Cake())) class TestTabulate(TestCase): def test_tabulation_of_string(self): self.assertEqual(tabulate('foo\\nbar'), ' foo\\n bar')", "pass # def after_each(): pass # def after_all(): pass # def it_should_capture_this_method(): pass", "__init__(self, cake, bar): self.cake = cake self.bar() class Cake(object): def __init__(self, roflcopter, empty):", "None, None)) def it_returns_argspec_of_class_constructor(self): class Foo(object): def __init__(self, f): pass self.assertEqual(getargspec(Foo), (('f',), None,", "'after_all', # 'it_should_capture_this_method', # 'sample_func', # ] # self.assertEqual(set(context.keys()), set(methods)) # def test_reraises_any_exceptions_thrown(self):", "DescribeAcceptArgs(TestCase): def test_it_returns_True_for_function_with_one_arg(self): def foo(a): pass self.assertTrue(accepts_arg(foo)) def test_it_returns_True_for_class_method_with_one_arg(self): class Foobar(object): def foo(self,", "self.assertEqual(context, { # 'a': 'foo', # 'b': 3, # 'name': 'lol', # })", "after_all(): pass # def it_should_capture_this_method(): pass # context = locals_from_function(describe_spec) # methods =" ]
[ "if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24, 100)", "0) current_loop = [] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT)", "list(map(int, re.findall('\\d+', part))) }) else: print('Invalid part :', part) return loop def loop():", "in current_loop: if not current_loop or not module.connected: break match part['type']: case 'c':", "= load_loop(data['loop']) print(' > Event \"loop\" received :', current_loop) loop() def load_loop(loop_data): loop", "part in current_loop: if not current_loop or not module.connected: break match part['type']: case", "[] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24,", "- now) / part['data'][3]) - 1) set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color) # print(color.to_array())", "'type': 'w', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't',", "re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't', 'data': list(map(int, re.findall('\\d+', part)))", "current_color = color.Color(0, 0, 0) current_loop = [] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT)", "if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 / 255)) pwm_g.ChangeDutyCycle(color.g * (100 / 255)) pwm_b.ChangeDutyCycle(color.b", "is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 / 255)) pwm_g.ChangeDutyCycle(color.g * (100 / 255)) pwm_b.ChangeDutyCycle(color.b *", "re import time from modules.utils import ghc, color, project try: import RPi.GPIO as", "data['green'], data['blue']) print(' > Event \"color\" received :', c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data):", "as GPIO is_gpio = True except Exception: is_gpio = False print('Running module without", "'w', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't', 'data':", "current_loop current_loop = [] c = color.Color(data['red'], data['green'], data['blue']) print(' > Event \"color\"", ":', c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data): global current_loop current_loop = [] if 'loop'", "part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0] / 1000) case 't':", "RPi.GPIO as GPIO is_gpio = True except Exception: is_gpio = False print('Running module", "[] if 'loop' in data: current_loop = load_loop(data['loop']) print(' > Event \"loop\" received", "module without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0, 0)", "loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)', part): loop.append({ 'type':", "= now + part['data'][3] start_color = current_color.copy() end_color = color.Color(part['data'][0], part['data'][1], part['data'][2]) while", "start_color = current_color.copy() end_color = color.Color(part['data'][0], part['data'][1], part['data'][2]) while now < next: if", "current_loop or not module.connected: break now = time.time() * 1000 mix = abs(((next", "and module.connected: for part in current_loop: if not current_loop or not module.connected: break", "255)) pwm_b.ChangeDutyCycle(color.b * (100 / 255)) module.connect() module.wait() if is_gpio: pwm_r.stop() pwm_g.stop() pwm_b.stop()", "= GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data):", "False print('Running module without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0,", "next = now + part['data'][3] start_color = current_color.copy() end_color = color.Color(part['data'][0], part['data'][1], part['data'][2])", "@module.listening('color') def color_listener(data): global current_loop current_loop = [] c = color.Color(data['red'], data['green'], data['blue'])", "loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type':", "break now = time.time() * 1000 mix = abs(((next - now) / part['data'][3])", "current_color.copy() end_color = color.Color(part['data'][0], part['data'][1], part['data'][2]) while now < next: if not current_loop", "current_loop = [] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r", "project try: import RPi.GPIO as GPIO is_gpio = True except Exception: is_gpio =", "= time.time() * 1000 next = now + part['data'][3] start_color = current_color.copy() end_color", "part['data'][2]) while now < next: if not current_loop or not module.connected: break now", "set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0] / 1000) case 't': now = time.time()", "}) elif re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\\d+', part))) }) elif", "* (100 / 255)) pwm_b.ChangeDutyCycle(color.b * (100 / 255)) module.connect() module.wait() if is_gpio:", "time.time() * 1000 mix = abs(((next - now) / part['data'][3]) - 1) set_color(start_color.mix(end_color,", "if not current_loop or not module.connected: break match part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1],", "not current_loop or not module.connected: break match part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2]))", "load_loop(loop_data): loop = [] for part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type':", "print(' > Event \"loop\" received :', current_loop) loop() def load_loop(loop_data): loop = []", "def load_loop(loop_data): loop = [] for part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({", "'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't', 'data': list(map(int,", ":', current_loop) loop() def load_loop(loop_data): loop = [] for part in loop_data.split('|'): if", "Exception: is_gpio = False print('Running module without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value)", "part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0] / 1000) case 't': now = time.time() *", "def color_listener(data): global current_loop current_loop = [] c = color.Color(data['red'], data['green'], data['blue']) print('", "'type': 'c', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w',", "part['data'][2])) case 'w': time.sleep(part['data'][0] / 1000) case 't': now = time.time() * 1000", "'loop' in data: current_loop = load_loop(data['loop']) print(' > Event \"loop\" received :', current_loop)", "[] for part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c', 'data': list(map(int,", "}) else: print('Invalid part :', part) return loop def loop(): while current_loop and", "GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10,", "= color.Color(data['red'], data['green'], data['blue']) print(' > Event \"color\" received :', c.to_array()) set_color(c) @module.listening('loop')", "loop() def load_loop(loop_data): loop = [] for part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part):", "current_loop and module.connected: for part in current_loop: if not current_loop or not module.connected:", "= current_color.copy() end_color = color.Color(part['data'][0], part['data'][1], part['data'][2]) while now < next: if not", "Event \"loop\" received :', current_loop) loop() def load_loop(loop_data): loop = [] for part", ":', part) return loop def loop(): while current_loop and module.connected: for part in", "print('Running module without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0,", "break match part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0] / 1000)", "in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\\d+', part))) })", "= abs(((next - now) / part['data'][3]) - 1) set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color)", "print(' > Event \"color\" received :', c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data): global current_loop", "'t': now = time.time() * 1000 next = now + part['data'][3] start_color =", "= color.Color(part['data'][0], part['data'][1], part['data'][2]) while now < next: if not current_loop or not", "part): loop.append({ 'type': 't', 'data': list(map(int, re.findall('\\d+', part))) }) else: print('Invalid part :',", "pwm_r = GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10, 100) pwm_r.start(0)", "ghc, color, project try: import RPi.GPIO as GPIO is_gpio = True except Exception:", "import RPi.GPIO as GPIO is_gpio = True except Exception: is_gpio = False print('Running", "Event \"color\" received :', c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data): global current_loop current_loop =", "255)) pwm_g.ChangeDutyCycle(color.g * (100 / 255)) pwm_b.ChangeDutyCycle(color.b * (100 / 255)) module.connect() module.wait()", "GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27, 100) pwm_b =", "= GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0)", "current_loop or not module.connected: break match part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case", "c = color.Color(data['red'], data['green'], data['blue']) print(' > Event \"color\" received :', c.to_array()) set_color(c)", "re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\\d+', part)))", "time.sleep(part['data'][0] / 1000) case 't': now = time.time() * 1000 next = now", "def set_color(color): current_color.set_color(color) # print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 / 255)) pwm_g.ChangeDutyCycle(color.g", "loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\\d+', part))) }) elif", "'data': list(map(int, re.findall('\\d+', part))) }) else: print('Invalid part :', part) return loop def", "not module.connected: break now = time.time() * 1000 mix = abs(((next - now)", "re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)', part):", "import ghc, color, project try: import RPi.GPIO as GPIO is_gpio = True except", "'w': time.sleep(part['data'][0] / 1000) case 't': now = time.time() * 1000 next =", "GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24, 100) pwm_g =", "+ part['data'][3] start_color = current_color.copy() end_color = color.Color(part['data'][0], part['data'][1], part['data'][2]) while now <", "now = time.time() * 1000 mix = abs(((next - now) / part['data'][3]) -", "'t', 'data': list(map(int, re.findall('\\d+', part))) }) else: print('Invalid part :', part) return loop", "pwm_g.ChangeDutyCycle(color.g * (100 / 255)) pwm_b.ChangeDutyCycle(color.b * (100 / 255)) module.connect() module.wait() if", "else: print('Invalid part :', part) return loop def loop(): while current_loop and module.connected:", "# print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 / 255)) pwm_g.ChangeDutyCycle(color.g * (100 /", "GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0)", "'c', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w', 'data':", "'type': 't', 'data': list(map(int, re.findall('\\d+', part))) }) else: print('Invalid part :', part) return", "elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't', 'data': list(map(int, re.findall('\\d+', part))) }) else: print('Invalid", "= color.Color(0, 0, 0) current_loop = [] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27,", "GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27, 100) pwm_b", "module.connected: break match part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0] /", "or not module.connected: break now = time.time() * 1000 mix = abs(((next -", "re.findall('\\d+', part))) }) else: print('Invalid part :', part) return loop def loop(): while", "match part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0] / 1000) case", "pwm_b.ChangeDutyCycle(color.b * (100 / 255)) module.connect() module.wait() if is_gpio: pwm_r.stop() pwm_g.stop() pwm_b.stop() GPIO.cleanup()", "* 1000 mix = abs(((next - now) / part['data'][3]) - 1) set_color(start_color.mix(end_color, mix))", "abs(((next - now) / part['data'][3]) - 1) set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color) #", "- 1) set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color) # print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r *", "received :', current_loop) loop() def load_loop(loop_data): loop = [] for part in loop_data.split('|'):", "1) set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color) # print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100", "color.Color(data['red'], data['green'], data['blue']) print(' > Event \"color\" received :', c.to_array()) set_color(c) @module.listening('loop') def", "is_gpio = True except Exception: is_gpio = False print('Running module without GPIO') module", "= [] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r =", "100) pwm_g = GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color')", "case 't': now = time.time() * 1000 next = now + part['data'][3] start_color", "= [] if 'loop' in data: current_loop = load_loop(data['loop']) print(' > Event \"loop\"", "module.connected: for part in current_loop: if not current_loop or not module.connected: break match", "part['data'][3] start_color = current_color.copy() end_color = color.Color(part['data'][0], part['data'][1], part['data'][2]) while now < next:", "/ 255)) pwm_g.ChangeDutyCycle(color.g * (100 / 255)) pwm_b.ChangeDutyCycle(color.b * (100 / 255)) module.connect()", "case 'w': time.sleep(part['data'][0] / 1000) case 't': now = time.time() * 1000 next", "in data: current_loop = load_loop(data['loop']) print(' > Event \"loop\" received :', current_loop) loop()", "ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0, 0) current_loop = [] if is_gpio:", "<reponame>la-guirlande/modules import re import time from modules.utils import ghc, color, project try: import", "GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data): global current_loop current_loop = []", "pwm_g = GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def", "set_color(color): current_color.set_color(color) # print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 / 255)) pwm_g.ChangeDutyCycle(color.g *", "now + part['data'][3] start_color = current_color.copy() end_color = color.Color(part['data'][0], part['data'][1], part['data'][2]) while now", "color, project try: import RPi.GPIO as GPIO is_gpio = True except Exception: is_gpio", "module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0, 0) current_loop = []", "100) pwm_b = GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data): global current_loop", "now < next: if not current_loop or not module.connected: break now = time.time()", "[] c = color.Color(data['red'], data['green'], data['blue']) print(' > Event \"color\" received :', c.to_array())", "current_loop current_loop = [] if 'loop' in data: current_loop = load_loop(data['loop']) print(' >", "part['data'][3]) - 1) set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color) # print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r", "data: current_loop = load_loop(data['loop']) print(' > Event \"loop\" received :', current_loop) loop() def", "print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 / 255)) pwm_g.ChangeDutyCycle(color.g * (100 / 255))", "global current_loop current_loop = [] c = color.Color(data['red'], data['green'], data['blue']) print(' > Event", "= True except Exception: is_gpio = False print('Running module without GPIO') module =", "is_gpio = False print('Running module without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color", "part))) }) elif re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\\d+', part))) })", "list(map(int, re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\\d+',", "= ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0, 0) current_loop = [] if", "part): loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)', part): loop.append({", "if 'loop' in data: current_loop = load_loop(data['loop']) print(' > Event \"loop\" received :',", "time.time() * 1000 next = now + part['data'][3] start_color = current_color.copy() end_color =", "(100 / 255)) pwm_b.ChangeDutyCycle(color.b * (100 / 255)) module.connect() module.wait() if is_gpio: pwm_r.stop()", "mix = abs(((next - now) / part['data'][3]) - 1) set_color(start_color.mix(end_color, mix)) def set_color(color):", "\"loop\" received :', current_loop) loop() def load_loop(loop_data): loop = [] for part in", "re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part):", "without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0, 0) current_loop", "not module.connected: break match part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0]", "not current_loop or not module.connected: break now = time.time() * 1000 mix =", "mix)) def set_color(color): current_color.set_color(color) # print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 / 255))", "import time from modules.utils import ghc, color, project try: import RPi.GPIO as GPIO", "part :', part) return loop def loop(): while current_loop and module.connected: for part", "0, 0) current_loop = [] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10,", "color.Color(0, 0, 0) current_loop = [] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT)", "re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't', 'data': list(map(int, re.findall('\\d+', part))) }) else: print('Invalid part", "part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\\d+', part)))", "pwm_r.ChangeDutyCycle(color.r * (100 / 255)) pwm_g.ChangeDutyCycle(color.g * (100 / 255)) pwm_b.ChangeDutyCycle(color.b * (100", "part): loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({", "'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0] / 1000) case 't': now =", "data['blue']) print(' > Event \"color\" received :', c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data): global", "is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24, 100) pwm_g", "global current_loop current_loop = [] if 'loop' in data: current_loop = load_loop(data['loop']) print('", "while current_loop and module.connected: for part in current_loop: if not current_loop or not", "part['data'][1], part['data'][2]) while now < next: if not current_loop or not module.connected: break", "part) return loop def loop(): while current_loop and module.connected: for part in current_loop:", "except Exception: is_gpio = False print('Running module without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value,", "color_listener(data): global current_loop current_loop = [] c = color.Color(data['red'], data['green'], data['blue']) print(' >", "pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data): global current_loop current_loop = [] c =", "GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0, 0) current_loop =", "for part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\\d+',", "print('Invalid part :', part) return loop def loop(): while current_loop and module.connected: for", "time from modules.utils import ghc, color, project try: import RPi.GPIO as GPIO is_gpio", "module.connected: break now = time.time() * 1000 mix = abs(((next - now) /", "= time.time() * 1000 mix = abs(((next - now) / part['data'][3]) - 1)", "from modules.utils import ghc, color, project try: import RPi.GPIO as GPIO is_gpio =", "@module.listening('loop') def loop_listener(data): global current_loop current_loop = [] if 'loop' in data: current_loop", "project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0, 0) current_loop = [] if is_gpio: GPIO.setmode(GPIO.BCM) GPIO.setup(24,", "set_color(c) @module.listening('loop') def loop_listener(data): global current_loop current_loop = [] if 'loop' in data:", "'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w', 'data': list(map(int,", "1000 next = now + part['data'][3] start_color = current_color.copy() end_color = color.Color(part['data'][0], part['data'][1],", "}) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't', 'data': list(map(int, re.findall('\\d+', part))) }) else:", "def loop_listener(data): global current_loop current_loop = [] if 'loop' in data: current_loop =", "return loop def loop(): while current_loop and module.connected: for part in current_loop: if", "1000) case 't': now = time.time() * 1000 next = now + part['data'][3]", "part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't', 'data': list(map(int, re.findall('\\d+', part))) })", "if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('w\\(\\d+\\)',", "now) / part['data'][3]) - 1) set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color) # print(color.to_array()) if", "GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data): global", "while now < next: if not current_loop or not module.connected: break now =", "loop(): while current_loop and module.connected: for part in current_loop: if not current_loop or", "color.Color(part['data'][0], part['data'][1], part['data'][2]) while now < next: if not current_loop or not module.connected:", "loop = [] for part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c',", "= [] for part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)', part): loop.append({ 'type': 'c', 'data':", "or not module.connected: break match part['type']: case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w':", "current_loop = load_loop(data['loop']) print(' > Event \"loop\" received :', current_loop) loop() def load_loop(loop_data):", "import re import time from modules.utils import ghc, color, project try: import RPi.GPIO", "/ 255)) pwm_b.ChangeDutyCycle(color.b * (100 / 255)) module.connect() module.wait() if is_gpio: pwm_r.stop() pwm_g.stop()", "now = time.time() * 1000 next = now + part['data'][3] start_color = current_color.copy()", "100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data): global current_loop current_loop = [] c", "received :', c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data): global current_loop current_loop = [] if", "(100 / 255)) pwm_g.ChangeDutyCycle(color.g * (100 / 255)) pwm_b.ChangeDutyCycle(color.b * (100 / 255))", "GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27, 100)", "list(map(int, re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)', part): loop.append({ 'type': 't', 'data': list(map(int, re.findall('\\d+',", "part))) }) else: print('Invalid part :', part) return loop def loop(): while current_loop", "current_loop = [] if 'loop' in data: current_loop = load_loop(data['loop']) print(' > Event", "> Event \"color\" received :', c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data): global current_loop current_loop", "case 'c': set_color(color.Color(part['data'][0], part['data'][1], part['data'][2])) case 'w': time.sleep(part['data'][0] / 1000) case 't': now", "1000 mix = abs(((next - now) / part['data'][3]) - 1) set_color(start_color.mix(end_color, mix)) def", "current_loop: if not current_loop or not module.connected: break match part['type']: case 'c': set_color(color.Color(part['data'][0],", "True except Exception: is_gpio = False print('Running module without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP,", "pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data): global current_loop current_loop = [] c = color.Color(data['red'],", "GPIO.setup(24, GPIO.OUT) GPIO.setup(27, GPIO.OUT) GPIO.setup(10, GPIO.OUT) pwm_r = GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27,", "project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color = color.Color(0, 0, 0) current_loop = [] if is_gpio: GPIO.setmode(GPIO.BCM)", "/ part['data'][3]) - 1) set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color) # print(color.to_array()) if is_gpio:", "GPIO.OUT) pwm_r = GPIO.PWM(24, 100) pwm_g = GPIO.PWM(27, 100) pwm_b = GPIO.PWM(10, 100)", "if not current_loop or not module.connected: break now = time.time() * 1000 mix", "< next: if not current_loop or not module.connected: break now = time.time() *", "current_color.set_color(color) # print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 / 255)) pwm_g.ChangeDutyCycle(color.g * (100", "modules.utils import ghc, color, project try: import RPi.GPIO as GPIO is_gpio = True", "* (100 / 255)) pwm_g.ChangeDutyCycle(color.g * (100 / 255)) pwm_b.ChangeDutyCycle(color.b * (100 /", "set_color(start_color.mix(end_color, mix)) def set_color(color): current_color.set_color(color) # print(color.to_array()) if is_gpio: pwm_r.ChangeDutyCycle(color.r * (100 /", "def loop(): while current_loop and module.connected: for part in current_loop: if not current_loop", "c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data): global current_loop current_loop = [] if 'loop' in", "load_loop(data['loop']) print(' > Event \"loop\" received :', current_loop) loop() def load_loop(loop_data): loop =", "= False print('Running module without GPIO') module = ghc.Module(project.ModuleType.LED_STRIP, project.Paths.API_URL.value, project.Paths.WEBSOCKET_URL.value) current_color =", "current_loop = [] c = color.Color(data['red'], data['green'], data['blue']) print(' > Event \"color\" received", "pwm_b.start(0) @module.listening('color') def color_listener(data): global current_loop current_loop = [] c = color.Color(data['red'], data['green'],", "> Event \"loop\" received :', current_loop) loop() def load_loop(loop_data): loop = [] for", "\"color\" received :', c.to_array()) set_color(c) @module.listening('loop') def loop_listener(data): global current_loop current_loop = []", "= [] c = color.Color(data['red'], data['green'], data['blue']) print(' > Event \"color\" received :',", "loop def loop(): while current_loop and module.connected: for part in current_loop: if not", "for part in current_loop: if not current_loop or not module.connected: break match part['type']:", "* 1000 next = now + part['data'][3] start_color = current_color.copy() end_color = color.Color(part['data'][0],", "GPIO is_gpio = True except Exception: is_gpio = False print('Running module without GPIO')", "try: import RPi.GPIO as GPIO is_gpio = True except Exception: is_gpio = False", "/ 1000) case 't': now = time.time() * 1000 next = now +", "end_color = color.Color(part['data'][0], part['data'][1], part['data'][2]) while now < next: if not current_loop or", "pwm_b = GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data): global current_loop current_loop", "current_loop) loop() def load_loop(loop_data): loop = [] for part in loop_data.split('|'): if re.match('c\\(\\d{1,3},\\d{1,3},\\d{1,3}\\)',", "= GPIO.PWM(10, 100) pwm_r.start(0) pwm_g.start(0) pwm_b.start(0) @module.listening('color') def color_listener(data): global current_loop current_loop =", "loop_listener(data): global current_loop current_loop = [] if 'loop' in data: current_loop = load_loop(data['loop'])", "elif re.match('w\\(\\d+\\)', part): loop.append({ 'type': 'w', 'data': list(map(int, re.findall('\\d+', part))) }) elif re.match('t\\(\\d{1,3},\\d{1,3},\\d{1,3},\\d+\\)',", "loop.append({ 'type': 't', 'data': list(map(int, re.findall('\\d+', part))) }) else: print('Invalid part :', part)", "next: if not current_loop or not module.connected: break now = time.time() * 1000" ]
[ "from fastapi import APIRouter router = APIRouter() @router.get(\"/test\") async def tester(): \"\"\" test", "example plugin to extend a /test route \"\"\" from fastapi import APIRouter router", "= APIRouter() @router.get(\"/test\") async def tester(): \"\"\" test route \"\"\" return [{\"result\": \"test\"}]", "route \"\"\" from fastapi import APIRouter router = APIRouter() @router.get(\"/test\") async def tester():", "to extend a /test route \"\"\" from fastapi import APIRouter router = APIRouter()", "fastapi import APIRouter router = APIRouter() @router.get(\"/test\") async def tester(): \"\"\" test route", "extend a /test route \"\"\" from fastapi import APIRouter router = APIRouter() @router.get(\"/test\")", "APIRouter router = APIRouter() @router.get(\"/test\") async def tester(): \"\"\" test route \"\"\" return", "/test route \"\"\" from fastapi import APIRouter router = APIRouter() @router.get(\"/test\") async def", "a /test route \"\"\" from fastapi import APIRouter router = APIRouter() @router.get(\"/test\") async", "\"\"\" from fastapi import APIRouter router = APIRouter() @router.get(\"/test\") async def tester(): \"\"\"", "router = APIRouter() @router.get(\"/test\") async def tester(): \"\"\" test route \"\"\" return [{\"result\":", "\"\"\" example plugin to extend a /test route \"\"\" from fastapi import APIRouter", "import APIRouter router = APIRouter() @router.get(\"/test\") async def tester(): \"\"\" test route \"\"\"", "plugin to extend a /test route \"\"\" from fastapi import APIRouter router =" ]
[ "abc import ABC, abstractmethod class PreConstraint(ABC): @abstractmethod def filter(self, ids, workload): raise NotImplemented()", "from abc import ABC, abstractmethod class PreConstraint(ABC): @abstractmethod def filter(self, ids, workload): raise" ]
[ "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND", "AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL", "prior written permission. # # THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS''", "not options.root: parser.print_help() return 1 config_file = os.sep.join([options.root, \"etc\", \"config.py\"]) if not os.path.isfile(config_file)", "IP info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret = {} for child in", "updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates) return 0 except Exception as", "without # modification, are permitted provided that the following conditions are met: #", "client.get(\"/json\") if response['status'] != 200 : raise Exception(\"Unable to retrieve IP info\") return", "All rights reserved. # # Redistribution and use in source and binary forms,", "software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY", "ip, force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in config.hosts : if", "xml_string_to_dict(response['body']) if data['ErrCount'] != '0' or data['IP'] != ip or data['Done'] != 'true'", "ip or data['Done'] != 'true' : raise Exception(\"Error encountered updating ip %s: %s\"", "data = xml_string_to_dict(response['body']) if data['ErrCount'] != '0' or data['IP'] != ip or data['Done']", ": client = http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\") if response['status'] != 200 :", "config def load_updates(dat) : if not os.path.exists(dat) : return {} with open(dat) as", "THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #", "* Redistributions of source code must retain the above copyright # notice, this", "OptionParser parser = OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\", type=\"string\", ) parser.add_option( \"--force\",", "HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "file found: %s\" % config_file) parser.print_help() return 1 config = load_config(config_file) var_dir =", "# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import", "are permitted provided that the following conditions are met: # * Redistributions of", "of conditions and the following disclaimer. # * Redistributions in binary form must", "distribution. # * Neither the name of dushin.net nor the # names of", "conditions and the following disclaimer in the # documentation and/or other materials provided", "load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates) return 0 except Exception as e: import logging", "response['status'] != 200 : raise Exception(\"Did not receive 200 on update IP info\")", "ip)) updates[host] = ip return updates def main(argv) : parser = create_parser() (options,", "POSSIBILITY OF SUCH DAMAGE. # import sys import os import json import xml.etree.ElementTree", "# # THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY #", "options.root: parser.print_help() return 1 config_file = os.sep.join([options.root, \"etc\", \"config.py\"]) if not os.path.isfile(config_file) :", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE", "PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY", "OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR", "action=\"store_true\", help=\"Force an update\", ) return parser def load_config(_config_file) : import config return", "SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY # EXPRESS OR IMPLIED", "def write_updates(dat, updates) : with open(dat, 'w') as f : f.write(json.dumps(updates)) def get_current_ip()", "ip %s\" % (host, config.domain, ip)) updates[host] = ip return updates def main(argv)", "BY dushin.net ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "notice, this list of conditions and the following disclaimer in the # documentation", "met: # * Redistributions of source code must retain the above copyright #", "this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED", "host in config.hosts : if not force and host in updates and updates[host]", "updating ip %s: %s\" % (ip, data)) config.logger.warn(\"Updated host %s in domain %s", "client = http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\") if response['status'] != 200 : raise", "this list of conditions and the following disclaimer. # * Redistributions in binary", "the following conditions are met: # * Redistributions of source code must retain", "load_config(config_file) var_dir = os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir) : os.mkdir(var_dir) dat = os.sep.join([var_dir,", "with or without # modification, are permitted provided that the following conditions are", "Skipping.\" % (host, ip)) continue params = { 'host': host, 'domain': config.domain, 'password':", "def main(argv) : parser = create_parser() (options, args) = parser.parse_args() try: if not", "conditions and the following disclaimer. # * Redistributions in binary form must reproduce", "if not os.path.exists(var_dir) : os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat),", "``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "= { 'host': host, 'domain': config.domain, 'password': <PASSWORD> } response = client.get(\"/update\", params=params)", "f : return json.loads(f.read()) def write_updates(dat, updates) : with open(dat, 'w') as f", "or data['IP'] != ip or data['Done'] != 'true' : raise Exception(\"Error encountered updating", "ret[child.tag] = child.text return ret def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates,", ": parser = create_parser() (options, args) = parser.parse_args() try: if not options.root: parser.print_help()", "print(\"No configuration file found: %s\" % config_file) parser.print_help() return 1 config = load_config(config_file)", "or data['Done'] != 'true' : raise Exception(\"Error encountered updating ip %s: %s\" %", "following disclaimer. # * Redistributions in binary form must reproduce the above copyright", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE,", "# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "= maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates) return 0 except Exception as e:", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER", "LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #", "USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY", "# * Neither the name of dushin.net nor the # names of its", "above copyright # notice, this list of conditions and the following disclaimer. #", "(c) dushin.net # All rights reserved. # # Redistribution and use in source", "list of conditions and the following disclaimer. # * Redistributions in binary form", "rights reserved. # # Redistribution and use in source and binary forms, with", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA,", "e: import logging logging.error(e) import traceback traceback.print_exc() return -1 if __name__ == \"__main__\"", "parser = create_parser() (options, args) = parser.parse_args() try: if not options.root: parser.print_help() return", "# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT", "} response = client.get(\"/update\", params=params) if response['status'] != 200 : raise Exception(\"Did not", "raise Exception(\"Did not receive 200 on update IP info\") data = xml_string_to_dict(response['body']) if", "data)) config.logger.warn(\"Updated host %s in domain %s with ip %s\" % (host, config.domain,", "Redistributions in binary form must reproduce the above copyright # notice, this list", "dushin.net BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "Exception(\"Unable to retrieve IP info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret = {}", "import sys import os import json import xml.etree.ElementTree as ElementTree import http_client def", "contributors may be used to endorse or promote products # derived from this", "the # names of its contributors may be used to endorse or promote", "its contributors may be used to endorse or promote products # derived from", "\"var\"]) if not os.path.exists(var_dir) : os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config,", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE)", ": raise Exception(\"Unable to retrieve IP info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret", "not force and host in updates and updates[host] == ip : config.logger.info(\"Host %s", "THE POSSIBILITY OF SUCH DAMAGE. # import sys import os import json import", "nor the # names of its contributors may be used to endorse or", "with open(dat, 'w') as f : f.write(json.dumps(updates)) def get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\",", "if not options.root: parser.print_help() return 1 config_file = os.sep.join([options.root, \"etc\", \"config.py\"]) if not", "OF THE POSSIBILITY OF SUCH DAMAGE. # import sys import os import json", "!= 200 : raise Exception(\"Did not receive 200 on update IP info\") data", "return config def load_updates(dat) : if not os.path.exists(dat) : return {} with open(dat)", "params = { 'host': host, 'domain': config.domain, 'password': <PASSWORD> } response = client.get(\"/update\",", "in domain %s with ip %s\" % (host, config.domain, ip)) updates[host] = ip", "force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in config.hosts : if not", "<PASSWORD> } response = client.get(\"/update\", params=params) if response['status'] != 200 : raise Exception(\"Did", ": with open(dat, 'w') as f : f.write(json.dumps(updates)) def get_current_ip() : client =", "* Redistributions in binary form must reproduce the above copyright # notice, this", "or without # modification, are permitted provided that the following conditions are met:", "% (host, ip)) continue params = { 'host': host, 'domain': config.domain, 'password': <PASSWORD>", "get_current_ip(), options.force) write_updates(dat, updates) return 0 except Exception as e: import logging logging.error(e)", "documentation and/or other materials provided with the distribution. # * Neither the name", "conditions are met: # * Redistributions of source code must retain the above", "response['status'] != 200 : raise Exception(\"Unable to retrieve IP info\") return json.loads(response['body'])['ip'] def", "and updates[host] == ip : config.logger.info(\"Host %s has ip %s. Skipping.\" % (host,", "OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS", "code must retain the above copyright # notice, this list of conditions and", "# * Redistributions in binary form must reproduce the above copyright # notice,", "# notice, this list of conditions and the following disclaimer in the #", "http_client def create_parser(): from optparse import OptionParser parser = OptionParser() parser.add_option( \"--root\", dest=\"root\",", "# modification, are permitted provided that the following conditions are met: # *", "encountered updating ip %s: %s\" % (ip, data)) config.logger.warn(\"Updated host %s in domain", "'host': host, 'domain': config.domain, 'password': <PASSWORD> } response = client.get(\"/update\", params=params) if response['status']", "as e: import logging logging.error(e) import traceback traceback.print_exc() return -1 if __name__ ==", "if response['status'] != 200 : raise Exception(\"Unable to retrieve IP info\") return json.loads(response['body'])['ip']", "return 1 config_file = os.sep.join([options.root, \"etc\", \"config.py\"]) if not os.path.isfile(config_file) : print(\"No configuration", "AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR", "# Copyright (c) dushin.net # All rights reserved. # # Redistribution and use", "0 except Exception as e: import logging logging.error(e) import traceback traceback.print_exc() return -1", "= os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates) return 0", "IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL,", "'true' : raise Exception(\"Error encountered updating ip %s: %s\" % (ip, data)) config.logger.warn(\"Updated", "parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force an update\", ) return parser def load_config(_config_file) :", "LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #", "os.path.isfile(config_file) : print(\"No configuration file found: %s\" % config_file) parser.print_help() return 1 config", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN", "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #", "and use in source and binary forms, with or without # modification, are", "= child.text return ret def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip,", "FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL dushin.net BE", "var_dir = os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir) : os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"])", "AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "'w') as f : f.write(json.dumps(updates)) def get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\", secure=True) response", "json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret = {} for child in element : ret[child.tag]", ": if not force and host in updates and updates[host] == ip :", "CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,", "Copyright (c) dushin.net # All rights reserved. # # Redistribution and use in", "# * Redistributions of source code must retain the above copyright # notice,", "this list of conditions and the following disclaimer in the # documentation and/or", "# names of its contributors may be used to endorse or promote products", "return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip, force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for", "%s\" % (host, config.domain, ip)) updates[host] = ip return updates def main(argv) :", "(ip, data)) config.logger.warn(\"Updated host %s in domain %s with ip %s\" % (host,", ": ret[child.tag] = child.text return ret def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config,", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS", "1 config = load_config(config_file) var_dir = os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir) : os.mkdir(var_dir)", "xml.etree.ElementTree as ElementTree import http_client def create_parser(): from optparse import OptionParser parser =", "\"--root\", dest=\"root\", help=\"Root directory\", type=\"string\", ) parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force an update\",", "provided with the distribution. # * Neither the name of dushin.net nor the", ": config.logger.info(\"Host %s has ip %s. Skipping.\" % (host, ip)) continue params =", "disclaimer in the # documentation and/or other materials provided with the distribution. #", "on update IP info\") data = xml_string_to_dict(response['body']) if data['ErrCount'] != '0' or data['IP']", "return 0 except Exception as e: import logging logging.error(e) import traceback traceback.print_exc() return", "SHALL dushin.net BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "\"etc\", \"config.py\"]) if not os.path.isfile(config_file) : print(\"No configuration file found: %s\" % config_file)", "= http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in config.hosts : if not force and host", "{} with open(dat) as f : return json.loads(f.read()) def write_updates(dat, updates) : with", "open(dat) as f : return json.loads(f.read()) def write_updates(dat, updates) : with open(dat, 'w')", "data['IP'] != ip or data['Done'] != 'true' : raise Exception(\"Error encountered updating ip", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "updates[host] = ip return updates def main(argv) : parser = create_parser() (options, args)", "get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\") if response['status'] != 200", "(host, ip)) continue params = { 'host': host, 'domain': config.domain, 'password': <PASSWORD> }", "== ip : config.logger.info(\"Host %s has ip %s. Skipping.\" % (host, ip)) continue", "open(dat, 'w') as f : f.write(json.dumps(updates)) def get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\", secure=True)", "def load_config(_config_file) : import config return config def load_updates(dat) : if not os.path.exists(dat)", "must retain the above copyright # notice, this list of conditions and the", "with open(dat) as f : return json.loads(f.read()) def write_updates(dat, updates) : with open(dat,", "{ 'host': host, 'domain': config.domain, 'password': <PASSWORD> } response = client.get(\"/update\", params=params) if", "Exception(\"Did not receive 200 on update IP info\") data = xml_string_to_dict(response['body']) if data['ErrCount']", "ip return updates def main(argv) : parser = create_parser() (options, args) = parser.parse_args()", "written permission. # # THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND", "THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "Redistribution and use in source and binary forms, with or without # modification,", "(options, args) = parser.parse_args() try: if not options.root: parser.print_help() return 1 config_file =", "config.domain, 'password': <PASSWORD> } response = client.get(\"/update\", params=params) if response['status'] != 200 :", "= xml_string_to_dict(response['body']) if data['ErrCount'] != '0' or data['IP'] != ip or data['Done'] !=", "source and binary forms, with or without # modification, are permitted provided that", "domain %s with ip %s\" % (host, config.domain, ip)) updates[host] = ip return", "not receive 200 on update IP info\") data = xml_string_to_dict(response['body']) if data['ErrCount'] !=", "ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED", "= client.get(\"/update\", params=params) if response['status'] != 200 : raise Exception(\"Did not receive 200", "above copyright # notice, this list of conditions and the following disclaimer in", ": client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in config.hosts : if not force", "return updates def main(argv) : parser = create_parser() (options, args) = parser.parse_args() try:", "in updates and updates[host] == ip : config.logger.info(\"Host %s has ip %s. Skipping.\"", "response = client.get(\"/json\") if response['status'] != 200 : raise Exception(\"Unable to retrieve IP", "raise Exception(\"Unable to retrieve IP info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret =", "binary form must reproduce the above copyright # notice, this list of conditions", "Redistributions of source code must retain the above copyright # notice, this list", "IS PROVIDED BY dushin.net ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES,", "form must reproduce the above copyright # notice, this list of conditions and", "TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "if not os.path.isfile(config_file) : print(\"No configuration file found: %s\" % config_file) parser.print_help() return", "notice, this list of conditions and the following disclaimer. # * Redistributions in", "BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING", "provided that the following conditions are met: # * Redistributions of source code", "!= 200 : raise Exception(\"Unable to retrieve IP info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element)", "main(argv) : parser = create_parser() (options, args) = parser.parse_args() try: if not options.root:", "used to endorse or promote products # derived from this software without specific", "are met: # * Redistributions of source code must retain the above copyright", "# # Copyright (c) dushin.net # All rights reserved. # # Redistribution and", ": f.write(json.dumps(updates)) def get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\") if", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS;", "of dushin.net nor the # names of its contributors may be used to", "promote products # derived from this software without specific prior written permission. #", "the above copyright # notice, this list of conditions and the following disclaimer", "force and host in updates and updates[host] == ip : config.logger.info(\"Host %s has", "info\") data = xml_string_to_dict(response['body']) if data['ErrCount'] != '0' or data['IP'] != ip or", ": raise Exception(\"Error encountered updating ip %s: %s\" % (ip, data)) config.logger.warn(\"Updated host", "ret def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip, force) : client", "json import xml.etree.ElementTree as ElementTree import http_client def create_parser(): from optparse import OptionParser", ": ret = {} for child in element : ret[child.tag] = child.text return", "retrieve IP info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret = {} for child", "EVENT SHALL dushin.net BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", ": import config return config def load_updates(dat) : if not os.path.exists(dat) : return", "update\", ) return parser def load_config(_config_file) : import config return config def load_updates(dat)", "options.force) write_updates(dat, updates) return 0 except Exception as e: import logging logging.error(e) import", "and the following disclaimer in the # documentation and/or other materials provided with", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO", "CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY,", "forms, with or without # modification, are permitted provided that the following conditions", "write_updates(dat, updates) return 0 except Exception as e: import logging logging.error(e) import traceback", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR", "create_parser() (options, args) = parser.parse_args() try: if not options.root: parser.print_help() return 1 config_file", "source code must retain the above copyright # notice, this list of conditions", "= parser.parse_args() try: if not options.root: parser.print_help() return 1 config_file = os.sep.join([options.root, \"etc\",", "xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip, force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\",", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "import xml.etree.ElementTree as ElementTree import http_client def create_parser(): from optparse import OptionParser parser", "in source and binary forms, with or without # modification, are permitted provided", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES", "OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", ") parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force an update\", ) return parser def load_config(_config_file)", "def maybe_update_ip(config, updates, ip, force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in", "# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED.", "write_updates(dat, updates) : with open(dat, 'w') as f : f.write(json.dumps(updates)) def get_current_ip() :", "IP info\") data = xml_string_to_dict(response['body']) if data['ErrCount'] != '0' or data['IP'] != ip", "# # Redistribution and use in source and binary forms, with or without", "'domain': config.domain, 'password': <PASSWORD> } response = client.get(\"/update\", params=params) if response['status'] != 200", "directory\", type=\"string\", ) parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force an update\", ) return parser", "host in updates and updates[host] == ip : config.logger.info(\"Host %s has ip %s.", "as ElementTree import http_client def create_parser(): from optparse import OptionParser parser = OptionParser()", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE,", "IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "PROVIDED BY dushin.net ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "the distribution. # * Neither the name of dushin.net nor the # names", ": raise Exception(\"Did not receive 200 on update IP info\") data = xml_string_to_dict(response['body'])", "os.path.exists(dat) : return {} with open(dat) as f : return json.loads(f.read()) def write_updates(dat,", "child.text return ret def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip, force)", "maybe_update_ip(config, updates, ip, force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in config.hosts", "import json import xml.etree.ElementTree as ElementTree import http_client def create_parser(): from optparse import", "%s. Skipping.\" % (host, ip)) continue params = { 'host': host, 'domain': config.domain,", "= os.sep.join([options.root, \"etc\", \"config.py\"]) if not os.path.isfile(config_file) : print(\"No configuration file found: %s\"", "optparse import OptionParser parser = OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\", type=\"string\", )", "data['ErrCount'] != '0' or data['IP'] != ip or data['Done'] != 'true' : raise", "for child in element : ret[child.tag] = child.text return ret def xml_string_to_dict(text) :", "%s in domain %s with ip %s\" % (host, config.domain, ip)) updates[host] =", "1 config_file = os.sep.join([options.root, \"etc\", \"config.py\"]) if not os.path.isfile(config_file) : print(\"No configuration file", "if not os.path.exists(dat) : return {} with open(dat) as f : return json.loads(f.read())", "!= ip or data['Done'] != 'true' : raise Exception(\"Error encountered updating ip %s:", "# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "client.get(\"/update\", params=params) if response['status'] != 200 : raise Exception(\"Did not receive 200 on", "GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "%s: %s\" % (ip, data)) config.logger.warn(\"Updated host %s in domain %s with ip", "create_parser(): from optparse import OptionParser parser = OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\",", "to retrieve IP info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret = {} for", "ret = {} for child in element : ret[child.tag] = child.text return ret", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO,", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY", "return json.loads(f.read()) def write_updates(dat, updates) : with open(dat, 'w') as f : f.write(json.dumps(updates))", "element : ret[child.tag] = child.text return ret def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def", "parser.print_help() return 1 config_file = os.sep.join([options.root, \"etc\", \"config.py\"]) if not os.path.isfile(config_file) : print(\"No", "dushin.net # All rights reserved. # # Redistribution and use in source and", "products # derived from this software without specific prior written permission. # #", "THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "json.loads(f.read()) def write_updates(dat, updates) : with open(dat, 'w') as f : f.write(json.dumps(updates)) def", "try: if not options.root: parser.print_help() return 1 config_file = os.sep.join([options.root, \"etc\", \"config.py\"]) if", "child in element : ret[child.tag] = child.text return ret def xml_string_to_dict(text) : return", ": if not os.path.exists(dat) : return {} with open(dat) as f : return", "PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR", "use in source and binary forms, with or without # modification, are permitted", "endorse or promote products # derived from this software without specific prior written", "import logging logging.error(e) import traceback traceback.print_exc() return -1 if __name__ == \"__main__\" :", "ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT", "in binary form must reproduce the above copyright # notice, this list of", "# documentation and/or other materials provided with the distribution. # * Neither the", "as f : return json.loads(f.read()) def write_updates(dat, updates) : with open(dat, 'w') as", "config = load_config(config_file) var_dir = os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir) : os.mkdir(var_dir) dat", "an update\", ) return parser def load_config(_config_file) : import config return config def", "# All rights reserved. # # Redistribution and use in source and binary", "type=\"string\", ) parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force an update\", ) return parser def", "config.logger.info(\"Host %s has ip %s. Skipping.\" % (host, ip)) continue params = {", "response = client.get(\"/update\", params=params) if response['status'] != 200 : raise Exception(\"Did not receive", "'password': <PASSWORD> } response = client.get(\"/update\", params=params) if response['status'] != 200 : raise", "200 on update IP info\") data = xml_string_to_dict(response['body']) if data['ErrCount'] != '0' or", "import os import json import xml.etree.ElementTree as ElementTree import http_client def create_parser(): from", "if response['status'] != 200 : raise Exception(\"Did not receive 200 on update IP", "= load_config(config_file) var_dir = os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir) : os.mkdir(var_dir) dat =", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "return ret def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip, force) :", "names of its contributors may be used to endorse or promote products #", "%s has ip %s. Skipping.\" % (host, ip)) continue params = { 'host':", "secure=True) for host in config.hosts : if not force and host in updates", "client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in config.hosts : if not force and", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF", "= http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\") if response['status'] != 200 : raise Exception(\"Unable", "dest=\"force\", action=\"store_true\", help=\"Force an update\", ) return parser def load_config(_config_file) : import config", "and/or other materials provided with the distribution. # * Neither the name of", "data['Done'] != 'true' : raise Exception(\"Error encountered updating ip %s: %s\" % (ip,", "load_updates(dat) : if not os.path.exists(dat) : return {} with open(dat) as f :", "host, 'domain': config.domain, 'password': <PASSWORD> } response = client.get(\"/update\", params=params) if response['status'] !=", "DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "and the following disclaimer. # * Redistributions in binary form must reproduce the", "update IP info\") data = xml_string_to_dict(response['body']) if data['ErrCount'] != '0' or data['IP'] !=", "not os.path.exists(dat) : return {} with open(dat) as f : return json.loads(f.read()) def", "% config_file) parser.print_help() return 1 config = load_config(config_file) var_dir = os.sep.join([options.root, \"var\"]) if", "if not force and host in updates and updates[host] == ip : config.logger.info(\"Host", "as f : f.write(json.dumps(updates)) def get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\", secure=True) response =", "= create_parser() (options, args) = parser.parse_args() try: if not options.root: parser.print_help() return 1", "from optparse import OptionParser parser = OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\", type=\"string\",", "(host, config.domain, ip)) updates[host] = ip return updates def main(argv) : parser =", "os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir) : os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"]) updates =", "= os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir) : os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"]) updates", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import sys import", "OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON", "CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\", type=\"string\", ) parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force an", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY", "ip %s: %s\" % (ip, data)) config.logger.warn(\"Updated host %s in domain %s with", "%s with ip %s\" % (host, config.domain, ip)) updates[host] = ip return updates", "%s\" % (ip, data)) config.logger.warn(\"Updated host %s in domain %s with ip %s\"", "xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip, force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED", "not os.path.isfile(config_file) : print(\"No configuration file found: %s\" % config_file) parser.print_help() return 1", "http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\") if response['status'] != 200 : raise Exception(\"Unable to", "OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "config return config def load_updates(dat) : if not os.path.exists(dat) : return {} with", ": return json.loads(f.read()) def write_updates(dat, updates) : with open(dat, 'w') as f :", "python # # Copyright (c) dushin.net # All rights reserved. # # Redistribution", "reproduce the above copyright # notice, this list of conditions and the following", ": return {} with open(dat) as f : return json.loads(f.read()) def write_updates(dat, updates)", "Neither the name of dushin.net nor the # names of its contributors may", "and binary forms, with or without # modification, are permitted provided that the", "= client.get(\"/json\") if response['status'] != 200 : raise Exception(\"Unable to retrieve IP info\")", "receive 200 on update IP info\") data = xml_string_to_dict(response['body']) if data['ErrCount'] != '0'", "OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\", type=\"string\", ) parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force", "help=\"Force an update\", ) return parser def load_config(_config_file) : import config return config", "modification, are permitted provided that the following conditions are met: # * Redistributions", "help=\"Root directory\", type=\"string\", ) parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force an update\", ) return", "return 1 config = load_config(config_file) var_dir = os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir) :", "permission. # # THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY", "sys import os import json import xml.etree.ElementTree as ElementTree import http_client def create_parser():", "in config.hosts : if not force and host in updates and updates[host] ==", "following disclaimer in the # documentation and/or other materials provided with the distribution.", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "f : f.write(json.dumps(updates)) def get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\")", "def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip, force) : client =", "params=params) if response['status'] != 200 : raise Exception(\"Did not receive 200 on update", "parser = OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\", type=\"string\", ) parser.add_option( \"--force\", dest=\"force\",", "updates and updates[host] == ip : config.logger.info(\"Host %s has ip %s. Skipping.\" %", "%s\" % config_file) parser.print_help() return 1 config = load_config(config_file) var_dir = os.sep.join([options.root, \"var\"])", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import sys import os import", "IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #", "import OptionParser parser = OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\", type=\"string\", ) parser.add_option(", "of conditions and the following disclaimer in the # documentation and/or other materials", "of its contributors may be used to endorse or promote products # derived", "OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret = {} for child in element", ") return parser def load_config(_config_file) : import config return config def load_updates(dat) :", "\"config.py\"]) if not os.path.isfile(config_file) : print(\"No configuration file found: %s\" % config_file) parser.print_help()", "200 : raise Exception(\"Unable to retrieve IP info\") return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) :", "THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY # EXPRESS OR", "def load_updates(dat) : if not os.path.exists(dat) : return {} with open(dat) as f", "parser def load_config(_config_file) : import config return config def load_updates(dat) : if not", "IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "not os.path.exists(var_dir) : os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat), get_current_ip(),", ": return xml_tree_to_dict(ElementTree.fromstring(text)) def maybe_update_ip(config, updates, ip, force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True)", "and host in updates and updates[host] == ip : config.logger.info(\"Host %s has ip", "!= '0' or data['IP'] != ip or data['Done'] != 'true' : raise Exception(\"Error", "config.domain, ip)) updates[host] = ip return updates def main(argv) : parser = create_parser()", "'0' or data['IP'] != ip or data['Done'] != 'true' : raise Exception(\"Error encountered", "config_file) parser.print_help() return 1 config = load_config(config_file) var_dir = os.sep.join([options.root, \"var\"]) if not", "has ip %s. Skipping.\" % (host, ip)) continue params = { 'host': host,", "NO EVENT SHALL dushin.net BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "config.hosts : if not force and host in updates and updates[host] == ip", "os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates) return 0 except", "os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates)", "= OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root directory\", type=\"string\", ) parser.add_option( \"--force\", dest=\"force\", action=\"store_true\",", "ARE # DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY #", "os.path.exists(var_dir) : os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force)", "dat = os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates) return", ": os.mkdir(var_dir) dat = os.sep.join([var_dir, \"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat,", "OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "os.sep.join([options.root, \"etc\", \"config.py\"]) if not os.path.isfile(config_file) : print(\"No configuration file found: %s\" %", "secure=True) response = client.get(\"/json\") if response['status'] != 200 : raise Exception(\"Unable to retrieve", "IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF", "FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING,", "SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "#!/usr/bin/env python # # Copyright (c) dushin.net # All rights reserved. # #", "WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF", "return {} with open(dat) as f : return json.loads(f.read()) def write_updates(dat, updates) :", "os import json import xml.etree.ElementTree as ElementTree import http_client def create_parser(): from optparse", "other materials provided with the distribution. # * Neither the name of dushin.net", "the following disclaimer in the # documentation and/or other materials provided with the", "!= 'true' : raise Exception(\"Error encountered updating ip %s: %s\" % (ip, data))", "must reproduce the above copyright # notice, this list of conditions and the", "dushin.net ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "return json.loads(response['body'])['ip'] def xml_tree_to_dict(element) : ret = {} for child in element :", "SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "raise Exception(\"Error encountered updating ip %s: %s\" % (ip, data)) config.logger.warn(\"Updated host %s", "{} for child in element : ret[child.tag] = child.text return ret def xml_string_to_dict(text)", "the following disclaimer. # * Redistributions in binary form must reproduce the above", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #", "name of dushin.net nor the # names of its contributors may be used", "import config return config def load_updates(dat) : if not os.path.exists(dat) : return {}", "import http_client def create_parser(): from optparse import OptionParser parser = OptionParser() parser.add_option( \"--root\",", "configuration file found: %s\" % config_file) parser.print_help() return 1 config = load_config(config_file) var_dir", "DAMAGE. # import sys import os import json import xml.etree.ElementTree as ElementTree import", "parser.print_help() return 1 config = load_config(config_file) var_dir = os.sep.join([options.root, \"var\"]) if not os.path.exists(var_dir)", "# import sys import os import json import xml.etree.ElementTree as ElementTree import http_client", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT", "list of conditions and the following disclaimer in the # documentation and/or other", "following conditions are met: # * Redistributions of source code must retain the", "http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in config.hosts : if not force and host in", "f.write(json.dumps(updates)) def get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\") if response['status']", "% (ip, data)) config.logger.warn(\"Updated host %s in domain %s with ip %s\" %", "retain the above copyright # notice, this list of conditions and the following", "except Exception as e: import logging logging.error(e) import traceback traceback.print_exc() return -1 if", "updates, ip, force) : client = http_client.HttpClient(\"dynamicdns.park-your-domain.com\", secure=True) for host in config.hosts :", "def get_current_ip() : client = http_client.HttpClient(\"ipinfo.io\", secure=True) response = client.get(\"/json\") if response['status'] !=", "may be used to endorse or promote products # derived from this software", "# derived from this software without specific prior written permission. # # THIS", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF", "ip : config.logger.info(\"Host %s has ip %s. Skipping.\" % (host, ip)) continue params", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import sys import os", "the name of dushin.net nor the # names of its contributors may be", "be used to endorse or promote products # derived from this software without", "to endorse or promote products # derived from this software without specific prior", "dushin.net nor the # names of its contributors may be used to endorse", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "reserved. # # Redistribution and use in source and binary forms, with or", "# Redistribution and use in source and binary forms, with or without #", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import sys", "Exception(\"Error encountered updating ip %s: %s\" % (ip, data)) config.logger.warn(\"Updated host %s in", "updates[host] == ip : config.logger.info(\"Host %s has ip %s. Skipping.\" % (host, ip))", "if data['ErrCount'] != '0' or data['IP'] != ip or data['Done'] != 'true' :", "host %s in domain %s with ip %s\" % (host, config.domain, ip)) updates[host]", "in the # documentation and/or other materials provided with the distribution. # *", "ip %s. Skipping.\" % (host, ip)) continue params = { 'host': host, 'domain':", "DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY # DIRECT, INDIRECT,", "= {} for child in element : ret[child.tag] = child.text return ret def", "with ip %s\" % (host, config.domain, ip)) updates[host] = ip return updates def", "with the distribution. # * Neither the name of dushin.net nor the #", "200 : raise Exception(\"Did not receive 200 on update IP info\") data =", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS", "or promote products # derived from this software without specific prior written permission.", "def xml_tree_to_dict(element) : ret = {} for child in element : ret[child.tag] =", "USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "config_file = os.sep.join([options.root, \"etc\", \"config.py\"]) if not os.path.isfile(config_file) : print(\"No configuration file found:", "Exception as e: import logging logging.error(e) import traceback traceback.print_exc() return -1 if __name__", "updates) : with open(dat, 'w') as f : f.write(json.dumps(updates)) def get_current_ip() : client", "# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #", "TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "* Neither the name of dushin.net nor the # names of its contributors", "binary forms, with or without # modification, are permitted provided that the following", "of source code must retain the above copyright # notice, this list of", "specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY dushin.net ``AS", "parser.parse_args() try: if not options.root: parser.print_help() return 1 config_file = os.sep.join([options.root, \"etc\", \"config.py\"])", "for host in config.hosts : if not force and host in updates and", "ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN", "continue params = { 'host': host, 'domain': config.domain, 'password': <PASSWORD> } response =", "from this software without specific prior written permission. # # THIS SOFTWARE IS", "updates def main(argv) : parser = create_parser() (options, args) = parser.parse_args() try: if", "# notice, this list of conditions and the following disclaimer. # * Redistributions", "args) = parser.parse_args() try: if not options.root: parser.print_help() return 1 config_file = os.sep.join([options.root,", "FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL dushin.net", "load_config(_config_file) : import config return config def load_updates(dat) : if not os.path.exists(dat) :", "ElementTree import http_client def create_parser(): from optparse import OptionParser parser = OptionParser() parser.add_option(", "xml_tree_to_dict(element) : ret = {} for child in element : ret[child.tag] = child.text", "logging logging.error(e) import traceback traceback.print_exc() return -1 if __name__ == \"__main__\" : sys.exit(main(sys.argv))", "the above copyright # notice, this list of conditions and the following disclaimer.", "BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF", "= ip return updates def main(argv) : parser = create_parser() (options, args) =", "derived from this software without specific prior written permission. # # THIS SOFTWARE", "in element : ret[child.tag] = child.text return ret def xml_string_to_dict(text) : return xml_tree_to_dict(ElementTree.fromstring(text))", "without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY dushin.net", "LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "% (host, config.domain, ip)) updates[host] = ip return updates def main(argv) : parser", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN", "dest=\"root\", help=\"Root directory\", type=\"string\", ) parser.add_option( \"--force\", dest=\"force\", action=\"store_true\", help=\"Force an update\", )", "# DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY # DIRECT,", "ip)) continue params = { 'host': host, 'domain': config.domain, 'password': <PASSWORD> } response", "\"--force\", dest=\"force\", action=\"store_true\", help=\"Force an update\", ) return parser def load_config(_config_file) : import", "\"update.dat\"]) updates = maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates) return 0 except Exception", "OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "permitted provided that the following conditions are met: # * Redistributions of source", "materials provided with the distribution. # * Neither the name of dushin.net nor", "A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE", "found: %s\" % config_file) parser.print_help() return 1 config = load_config(config_file) var_dir = os.sep.join([options.root,", "OF SUCH DAMAGE. # import sys import os import json import xml.etree.ElementTree as", ": print(\"No configuration file found: %s\" % config_file) parser.print_help() return 1 config =", "disclaimer. # * Redistributions in binary form must reproduce the above copyright #", "the # documentation and/or other materials provided with the distribution. # * Neither", "# THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY # EXPRESS", "that the following conditions are met: # * Redistributions of source code must", "copyright # notice, this list of conditions and the following disclaimer in the", "updates) return 0 except Exception as e: import logging logging.error(e) import traceback traceback.print_exc()", "maybe_update_ip(config, load_updates(dat), get_current_ip(), options.force) write_updates(dat, updates) return 0 except Exception as e: import", "config.logger.warn(\"Updated host %s in domain %s with ip %s\" % (host, config.domain, ip))", "SUCH DAMAGE. # import sys import os import json import xml.etree.ElementTree as ElementTree", "return parser def load_config(_config_file) : import config return config def load_updates(dat) : if", "copyright # notice, this list of conditions and the following disclaimer. # *", "def create_parser(): from optparse import OptionParser parser = OptionParser() parser.add_option( \"--root\", dest=\"root\", help=\"Root" ]
[ "[False] # Prevents duplicate Read Aloud Buttons READ_ALOUD_STATUS = [False] def pdf_opened(): if", "from playsound import playsound from tkinter import * root = tk.Tk() root.title(\"Read Aloud:", "return file_split[-1] # Starts Text-to-Speech Process def generate_tts(): if PDF_STATUS[-1]: audio_reader = pyttsx3.init()", "[False] def pdf_opened(): if PDF_STATUS[-1]: return True return False # Opens PDF/EPUB file", "new file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location = file, width = 120, height", "tkPDFViewer as pdf from gtts import gTTS, tts from playsound import playsound from", "from tkinter.font import BOLD from tkinter import Canvas, filedialog from tkPDFViewer import tkPDFViewer", "FILENAME.append(filename) open_pdf(filename) # Open File Button open_file = tk.Button(root, text=\"Open a File\", padx=30,", "# Generates popup window after creating MP3 file def popup_msg(msg): popup = tk.Tk()", "files\", \"*.pdf\"), (\"All Files\", \"*.*\"))) close_button() label = tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename)", "my_text = \"\" for num in range(pages): page = my_pdf.getPage(num) my_text += page.extractText()", "root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any", "file for viewing and displays its name + extension def add_file(): if not", "filedialog from tkPDFViewer import tkPDFViewer as pdf from gtts import gTTS, tts from", "from tkinter import * root = tk.Tk() root.title(\"Read Aloud: Turn Your PDF Files", "range(pages): page = my_pdf.getPage(num) my_text += page.extractText() global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() +", "BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) #", "string of one PDF file path FILENAME = [] # PDF Status Indicators", "import * root = tk.Tk() root.title(\"Read Aloud: Turn Your PDF Files into Audiobooks", "file.split(\"/\") return file_split[-1] # Starts Text-to-Speech Process def generate_tts(): if PDF_STATUS[-1]: audio_reader =", "MP3 file \\\"{audiobook_name}\\\"\") # Generates popup window after creating MP3 file def popup_msg(msg):", "bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any PDF into an Audiobook\", font=(\"Helvetica\", 21,", "= file.split(\"/\") return file_split[-1] # Starts Text-to-Speech Process def generate_tts(): if PDF_STATUS[-1]: audio_reader", "close_button(): close_file = tk.Button(frame, text=\"Close File\", padx=20, pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack() def", "PDF/EPUB file for viewing and displays its name + extension def add_file(): if", "set_pdf.pack() # Creates read aloud button if not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate", "read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file = tk.Button(frame, text=\"Close File\", padx=20, pady=7, fg=\"white\",", "+ \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\") # Generates", "tkinter as tk import PyPDF3 from tkinter.font import BOLD from tkinter import Canvas,", "pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() # Returns \"file name + .extension\" def", "Buttons READ_ALOUD_STATUS = [False] def pdf_opened(): if PDF_STATUS[-1]: return True return False #", "command=add_file) open_file.pack() # Returns \"file name + .extension\" def filename_from_path(file): file_split = file.split(\"/\")", "# Clears any previous PDF images before loading new file pdf_var.img_object_li.clear() set_pdf =", "False # Opens PDF/EPUB file for viewing and displays its name + extension", "relheight=0.8, relx=0.1, rely=0.1) # Holds string of one PDF file path FILENAME =", "canvas.create_text(400, 45, text=\"Turn any PDF into an Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\")", "of Booleans PDF_STATUS = [False] # Prevents duplicate Read Aloud Buttons READ_ALOUD_STATUS =", "from tkinter import Canvas, filedialog from tkPDFViewer import tkPDFViewer as pdf from gtts", "\"rb\") as file: my_pdf = PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text = \"\" for", "Starts Text-to-Speech Process def generate_tts(): if PDF_STATUS[-1]: audio_reader = pyttsx3.init() with open(FILENAME[-1], \"rb\")", "a List of Booleans PDF_STATUS = [False] # Prevents duplicate Read Aloud Buttons", "root = tk.Tk() root.title(\"Read Aloud: Turn Your PDF Files into Audiobooks [Stanford Code", "Code in Place 2021 Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800,", "page = my_pdf.getPage(num) my_text += page.extractText() global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \"", "def generate_tts(): if PDF_STATUS[-1]: audio_reader = pyttsx3.init() with open(FILENAME[-1], \"rb\") as file: my_pdf", "pdf_opened(): for widget in frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF", "filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\", \"*.*\"))) close_button() label =", "PDF images before loading new file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location = file,", "\"*.*\"))) close_button() label = tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) # Open File Button", "frame def open_pdf(file): pdf_var = pdf.ShowPdf() # Clears any previous PDF images before", "Canvas, filedialog from tkPDFViewer import tkPDFViewer as pdf from gtts import gTTS, tts", "Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8,", "extension def add_file(): if not pdf_opened(): for widget in frame.winfo_children(): widget.destroy() filename =", "tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) # Open File Button open_file = tk.Button(root, text=\"Open", "Read Aloud Buttons READ_ALOUD_STATUS = [False] def pdf_opened(): if PDF_STATUS[-1]: return True return", "a File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\", \"*.*\"))) close_button() label = tk.Label(frame, text=filename_from_path(filename))", "popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1 =", "aloud button if not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\",", "images before loading new file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location = file, width", "= 120, height = 120) set_pdf.pack() # Creates read aloud button if not", "generated MP3 file \\\"{audiobook_name}\\\"\") # Generates popup window after creating MP3 file def", "open_pdf(filename) # Open File Button open_file = tk.Button(root, text=\"Open a File\", padx=30, pady=5,", "text=\"Close File\", padx=20, pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack() def close(): for widget in", "for num in range(pages): page = my_pdf.getPage(num) my_text += page.extractText() global audiobook_name audiobook_name", "45, text=\"Turn any PDF into an Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\") canvas.pack()", "into an Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame = tk.Frame(root, bg=\"white\")", "def open_pdf(file): pdf_var = pdf.ShowPdf() # Clears any previous PDF images before loading", "Opens PDF in frame def open_pdf(file): pdf_var = pdf.ShowPdf() # Clears any previous", "def filename_from_path(file): file_split = file.split(\"/\") return file_split[-1] # Starts Text-to-Speech Process def generate_tts():", "# Starts Text-to-Speech Process def generate_tts(): if PDF_STATUS[-1]: audio_reader = pyttsx3.init() with open(FILENAME[-1],", "Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\") # Generates popup window", "File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() # Returns \"file name +", "pdf.ShowPdf() # Clears any previous PDF images before loading new file pdf_var.img_object_li.clear() set_pdf", "title=\"Select a File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\", \"*.*\"))) close_button() label = tk.Label(frame,", "# Opens PDF/EPUB file for viewing and displays its name + extension def", "import BOLD from tkinter import Canvas, filedialog from tkPDFViewer import tkPDFViewer as pdf", "FILENAME = [] # PDF Status Indicators in a List of Booleans PDF_STATUS", "font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1 = tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop()", "not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts)", "= my_pdf.getPage(num) my_text += page.extractText() global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\"", "import Canvas, filedialog from tkPDFViewer import tkPDFViewer as pdf from gtts import gTTS,", "PDF into an Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame = tk.Frame(root,", "padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file =", "for viewing and displays its name + extension def add_file(): if not pdf_opened():", "highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any PDF into an Audiobook\", font=(\"Helvetica\", 21, BOLD),", "text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) # Open File Button open_file = tk.Button(root, text=\"Open a", "filename_from_path(file): file_split = file.split(\"/\") return file_split[-1] # Starts Text-to-Speech Process def generate_tts(): if", "justify=\"center\", fill=\"white\") canvas.pack() frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) # Holds", "read aloud button if not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12,", "tk import PyPDF3 from tkinter.font import BOLD from tkinter import Canvas, filedialog from", "close_button() label = tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) # Open File Button open_file", "playsound from tkinter import * root = tk.Tk() root.title(\"Read Aloud: Turn Your PDF", "in Place 2021 Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800, width=800,", "= [False] def pdf_opened(): if PDF_STATUS[-1]: return True return False # Opens PDF/EPUB", "PDF Files into Audiobooks [Stanford Code in Place 2021 Python Project by <NAME>]\")", "# Prevents duplicate Read Aloud Buttons READ_ALOUD_STATUS = [False] def pdf_opened(): if PDF_STATUS[-1]:", "= pdf.ShowPdf() # Clears any previous PDF images before loading new file pdf_var.img_object_li.clear()", "File Button open_file = tk.Button(root, text=\"Open a File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\",", "READ_ALOUD_STATUS = [False] def pdf_opened(): if PDF_STATUS[-1]: return True return False # Opens", "= PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text = \"\" for num in range(pages): page", "playsound import playsound from tkinter import * root = tk.Tk() root.title(\"Read Aloud: Turn", "any PDF into an Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame =", "PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text = \"\" for num in range(pages): page =", "pdf_var.pdf_view(frame, file_location = file, width = 120, height = 120) set_pdf.pack() # Creates", "Status Indicators in a List of Booleans PDF_STATUS = [False] # Prevents duplicate", "21, BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1)", "relx=0.1, rely=0.1) # Holds string of one PDF file path FILENAME = []", "Open File Button open_file = tk.Button(root, text=\"Open a File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\",", "height = 120) set_pdf.pack() # Creates read aloud button if not READ_ALOUD_STATUS[-1]: read_aloud", "label = tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1 = tk.Button(popup, text=\"OK\",", "120, height = 120) set_pdf.pack() # Creates read aloud button if not READ_ALOUD_STATUS[-1]:", "from gtts import gTTS, tts from playsound import playsound from tkinter import *", "open_file.pack() # Returns \"file name + .extension\" def filename_from_path(file): file_split = file.split(\"/\") return", "tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1 = tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5,", "import gTTS, tts from playsound import playsound from tkinter import * root =", "= filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\")", "popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1 = tk.Button(popup,", "viewing and displays its name + extension def add_file(): if not pdf_opened(): for", "return False # Opens PDF/EPUB file for viewing and displays its name +", "tkinter import Canvas, filedialog from tkPDFViewer import tkPDFViewer as pdf from gtts import", "Button open_file = tk.Button(root, text=\"Open a File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file)", "fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() # Returns \"file name + .extension\" def filename_from_path(file):", "= tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() # Opens PDF in frame def", "pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location = file, width = 120, height = 120)", "text=\"Open a File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() # Returns \"file", "bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file = tk.Button(frame, text=\"Close File\",", "Holds string of one PDF file path FILENAME = [] # PDF Status", "= tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any PDF into", "with open(FILENAME[-1], \"rb\") as file: my_pdf = PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text =", "B1 = tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() # Opens PDF in frame", "READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack()", "# Returns \"file name + .extension\" def filename_from_path(file): file_split = file.split(\"/\") return file_split[-1]", "filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\") #", "+= page.extractText() global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait()", "my_pdf.numPages my_text = \"\" for num in range(pages): page = my_pdf.getPage(num) my_text +=", "B1.pack(pady=5, padx=5) popup.mainloop() # Opens PDF in frame def open_pdf(file): pdf_var = pdf.ShowPdf()", "padx=20, pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack() def close(): for widget in frame.winfo_children(): widget.destroy()", "my_text += page.extractText() global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name)", "Creates read aloud button if not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate Audiobook\", padx=5,", "window after creating MP3 file def popup_msg(msg): popup = tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\")", "Audiobooks [Stanford Code in Place 2021 Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas =", "List of Booleans PDF_STATUS = [False] # Prevents duplicate Read Aloud Buttons READ_ALOUD_STATUS", "global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated", "fill=\"white\") canvas.pack() frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) # Holds string", "one PDF file path FILENAME = [] # PDF Status Indicators in a", "pdf_opened(): if PDF_STATUS[-1]: return True return False # Opens PDF/EPUB file for viewing", "<gh_stars>0 import os, pyttsx3, tkinter as tk import PyPDF3 from tkinter.font import BOLD", "audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\") # Generates popup window after creating MP3", "pyttsx3.init() with open(FILENAME[-1], \"rb\") as file: my_pdf = PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text", "my_pdf.getPage(num) my_text += page.extractText() global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text,", "return True return False # Opens PDF/EPUB file for viewing and displays its", "command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() # Opens PDF in frame def open_pdf(file): pdf_var =", "close_file = tk.Button(frame, text=\"Close File\", padx=20, pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack() def close():", "a File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() # Returns \"file name", "= my_pdf.numPages my_text = \"\" for num in range(pages): page = my_pdf.getPage(num) my_text", "for widget in frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF files\",", "generate_tts(): if PDF_STATUS[-1]: audio_reader = pyttsx3.init() with open(FILENAME[-1], \"rb\") as file: my_pdf =", "audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3", "bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() # Returns \"file name + .extension\" def filename_from_path(file): file_split", "file, width = 120, height = 120) set_pdf.pack() # Creates read aloud button", "tk.Button(root, text=\"Open a File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() # Returns", "text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button():", "= pdf_var.pdf_view(frame, file_location = file, width = 120, height = 120) set_pdf.pack() #", "PDF in frame def open_pdf(file): pdf_var = pdf.ShowPdf() # Clears any previous PDF", "name + extension def add_file(): if not pdf_opened(): for widget in frame.winfo_children(): widget.destroy()", "filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\", \"*.*\"))) close_button() label = tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename)", "label = tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) # Open File Button open_file =", "popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\") # Generates popup window after creating MP3 file", "as pdf from gtts import gTTS, tts from playsound import playsound from tkinter", "\"\" for num in range(pages): page = my_pdf.getPage(num) my_text += page.extractText() global audiobook_name", "+ .extension\" def filename_from_path(file): file_split = file.split(\"/\") return file_split[-1] # Starts Text-to-Speech Process", "Returns \"file name + .extension\" def filename_from_path(file): file_split = file.split(\"/\") return file_split[-1] #", "+ extension def add_file(): if not pdf_opened(): for widget in frame.winfo_children(): widget.destroy() filename", "filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\", \"*.*\"))) close_button()", "file def popup_msg(msg): popup = tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label =", "PDF_STATUS[-1]: audio_reader = pyttsx3.init() with open(FILENAME[-1], \"rb\") as file: my_pdf = PyPDF3.PdfFileReader(file) pages", "pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack() def close(): for widget in frame.winfo_children(): widget.destroy() FILENAME.clear()", "import playsound from tkinter import * root = tk.Tk() root.title(\"Read Aloud: Turn Your", "def popup_msg(msg): popup = tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup,", ".extension\" def filename_from_path(file): file_split = file.split(\"/\") return file_split[-1] # Starts Text-to-Speech Process def", "popup_msg(msg): popup = tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg,", "rely=0.1) # Holds string of one PDF file path FILENAME = [] #", "import tkPDFViewer as pdf from gtts import gTTS, tts from playsound import playsound", "popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1", "pages = my_pdf.numPages my_text = \"\" for num in range(pages): page = my_pdf.getPage(num)", "Aloud: Turn Your PDF Files into Audiobooks [Stanford Code in Place 2021 Python", "tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any PDF into an", "# Open File Button open_file = tk.Button(root, text=\"Open a File\", padx=30, pady=5, fg=\"white\",", "before loading new file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location = file, width =", "File\", padx=20, pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack() def close(): for widget in frame.winfo_children():", "= tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1 = tk.Button(popup, text=\"OK\", command=popup.destroy)", "\\\"{audiobook_name}\\\"\") # Generates popup window after creating MP3 file def popup_msg(msg): popup =", "[] # PDF Status Indicators in a List of Booleans PDF_STATUS = [False]", "file_location = file, width = 120, height = 120) set_pdf.pack() # Creates read", "add_file(): if not pdf_opened(): for widget in frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select", "widget in frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF files\", \"*.pdf\"),", "= \"\" for num in range(pages): page = my_pdf.getPage(num) my_text += page.extractText() global", "\" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\") # Generates popup", "= [False] # Prevents duplicate Read Aloud Buttons READ_ALOUD_STATUS = [False] def pdf_opened():", "file path FILENAME = [] # PDF Status Indicators in a List of", "an Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8,", "any previous PDF images before loading new file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location", "command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file = tk.Button(frame, text=\"Close File\", padx=20, pady=7,", "(\"All Files\", \"*.*\"))) close_button() label = tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) # Open", "path FILENAME = [] # PDF Status Indicators in a List of Booleans", "Aloud Buttons READ_ALOUD_STATUS = [False] def pdf_opened(): if PDF_STATUS[-1]: return True return False", "pdf_var = pdf.ShowPdf() # Clears any previous PDF images before loading new file", "18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1 = tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() #", "justify=\"center\", command=add_file) open_file.pack() # Returns \"file name + .extension\" def filename_from_path(file): file_split =", "displays its name + extension def add_file(): if not pdf_opened(): for widget in", "button if not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\",", "in frame def open_pdf(file): pdf_var = pdf.ShowPdf() # Clears any previous PDF images", "# Holds string of one PDF file path FILENAME = [] # PDF", "Files into Audiobooks [Stanford Code in Place 2021 Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\")", "True return False # Opens PDF/EPUB file for viewing and displays its name", "read_aloud = tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True)", "<NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn", "= 120) set_pdf.pack() # Creates read aloud button if not READ_ALOUD_STATUS[-1]: read_aloud =", "as tk import PyPDF3 from tkinter.font import BOLD from tkinter import Canvas, filedialog", "pyttsx3, tkinter as tk import PyPDF3 from tkinter.font import BOLD from tkinter import", "# Creates read aloud button if not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate Audiobook\",", "= tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) # Open File Button open_file = tk.Button(root,", "Indicators in a List of Booleans PDF_STATUS = [False] # Prevents duplicate Read", "file_split[-1] # Starts Text-to-Speech Process def generate_tts(): if PDF_STATUS[-1]: audio_reader = pyttsx3.init() with", "pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file = tk.Button(frame,", "previous PDF images before loading new file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location =", "Your PDF Files into Audiobooks [Stanford Code in Place 2021 Python Project by", "# Opens PDF in frame def open_pdf(file): pdf_var = pdf.ShowPdf() # Clears any", "gTTS, tts from playsound import playsound from tkinter import * root = tk.Tk()", "my_pdf = PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text = \"\" for num in range(pages):", "def pdf_opened(): if PDF_STATUS[-1]: return True return False # Opens PDF/EPUB file for", "justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file = tk.Button(frame, text=\"Close File\", padx=20,", "root.title(\"Read Aloud: Turn Your PDF Files into Audiobooks [Stanford Code in Place 2021", "Files\", \"*.*\"))) close_button() label = tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) # Open File", "Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2)", "num in range(pages): page = my_pdf.getPage(num) my_text += page.extractText() global audiobook_name audiobook_name =", "label.pack() FILENAME.append(filename) open_pdf(filename) # Open File Button open_file = tk.Button(root, text=\"Open a File\",", "= file, width = 120, height = 120) set_pdf.pack() # Creates read aloud", "* root = tk.Tk() root.title(\"Read Aloud: Turn Your PDF Files into Audiobooks [Stanford", "popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\",", "into Audiobooks [Stanford Code in Place 2021 Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas", "Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10)", "= pyttsx3.init() with open(FILENAME[-1], \"rb\") as file: my_pdf = PyPDF3.PdfFileReader(file) pages = my_pdf.numPages", "frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) # Holds string of one", "after creating MP3 file def popup_msg(msg): popup = tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\")", "bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) # Holds string of one PDF file path", "tk.Tk() root.title(\"Read Aloud: Turn Your PDF Files into Audiobooks [Stanford Code in Place", "file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location = file, width = 120, height =", "width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any PDF into an Audiobook\", font=(\"Helvetica\",", "MP3 file def popup_msg(msg): popup = tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label", "if not pdf_opened(): for widget in frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a", "= [] # PDF Status Indicators in a List of Booleans PDF_STATUS =", "creating MP3 file def popup_msg(msg): popup = tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\")", "if not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\",", "canvas = tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any PDF", "and displays its name + extension def add_file(): if not pdf_opened(): for widget", "text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() # Opens PDF in frame def open_pdf(file): pdf_var", "[Stanford Code in Place 2021 Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root,", "its name + extension def add_file(): if not pdf_opened(): for widget in frame.winfo_children():", "pady=10) B1 = tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() # Opens PDF in", "pdf from gtts import gTTS, tts from playsound import playsound from tkinter import", "text=msg, font=\"Helvetica 18\") label.pack(side=\"top\", fill=\"x\", pady=10) B1 = tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5)", "PDF_STATUS.append(True) def close_button(): close_file = tk.Button(frame, text=\"Close File\", padx=20, pady=7, fg=\"white\", bg=\"black\", command=close)", "in frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All", "def close_button(): close_file = tk.Button(frame, text=\"Close File\", padx=20, pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack()", "padx=5) popup.mainloop() # Opens PDF in frame def open_pdf(file): pdf_var = pdf.ShowPdf() #", "width = 120, height = 120) set_pdf.pack() # Creates read aloud button if", "BOLD from tkinter import Canvas, filedialog from tkPDFViewer import tkPDFViewer as pdf from", "file \\\"{audiobook_name}\\\"\") # Generates popup window after creating MP3 file def popup_msg(msg): popup", "\"*.pdf\"), (\"All Files\", \"*.*\"))) close_button() label = tk.Label(frame, text=filename_from_path(filename)) label.pack() FILENAME.append(filename) open_pdf(filename) #", "widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\", \"*.*\")))", "popup window after creating MP3 file def popup_msg(msg): popup = tk.Tk() popup.title(\"Read Aloud\")", "tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def", "READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file = tk.Button(frame, text=\"Close File\", padx=20, pady=7, fg=\"white\", bg=\"black\",", "canvas.pack() frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) # Holds string of", "= filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\", \"*.*\"))) close_button() label", "as file: my_pdf = PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text = \"\" for num", "duplicate Read Aloud Buttons READ_ALOUD_STATUS = [False] def pdf_opened(): if PDF_STATUS[-1]: return True", "not pdf_opened(): for widget in frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\",", "by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45,", "tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg, font=\"Helvetica 18\") label.pack(side=\"top\",", "fg=\"white\", bg=\"black\", command=close) close_file.pack() def close(): for widget in frame.winfo_children(): widget.destroy() FILENAME.clear() PDF_STATUS.append(False)", "of one PDF file path FILENAME = [] # PDF Status Indicators in", "tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) # Holds string of one PDF file", "= tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) # Holds string of one PDF", "2021 Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\",", "open_file = tk.Button(root, text=\"Open a File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack()", "\"file name + .extension\" def filename_from_path(file): file_split = file.split(\"/\") return file_split[-1] # Starts", "label.pack(side=\"top\", fill=\"x\", pady=10) B1 = tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() # Opens", "in range(pages): page = my_pdf.getPage(num) my_text += page.extractText() global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title()", "= tk.Button(frame, text=\"Close File\", padx=20, pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack() def close(): for", "frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\", title=\"Select a File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\",", "tkinter import * root = tk.Tk() root.title(\"Read Aloud: Turn Your PDF Files into", "= tk.Tk() root.title(\"Read Aloud: Turn Your PDF Files into Audiobooks [Stanford Code in", "open_pdf(file): pdf_var = pdf.ShowPdf() # Clears any previous PDF images before loading new", "gtts import gTTS, tts from playsound import playsound from tkinter import * root", "Opens PDF/EPUB file for viewing and displays its name + extension def add_file():", "page.extractText() global audiobook_name audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully", "file: my_pdf = PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text = \"\" for num in", "PyPDF3 from tkinter.font import BOLD from tkinter import Canvas, filedialog from tkPDFViewer import", "height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any PDF into an Audiobook\",", "if PDF_STATUS[-1]: audio_reader = pyttsx3.init() with open(FILENAME[-1], \"rb\") as file: my_pdf = PyPDF3.PdfFileReader(file)", "if PDF_STATUS[-1]: return True return False # Opens PDF/EPUB file for viewing and", "PDF_STATUS = [False] # Prevents duplicate Read Aloud Buttons READ_ALOUD_STATUS = [False] def", "PDF Status Indicators in a List of Booleans PDF_STATUS = [False] # Prevents", "tkPDFViewer import tkPDFViewer as pdf from gtts import gTTS, tts from playsound import", "File\", filetypes=((\"PDF files\", \"*.pdf\"), (\"All Files\", \"*.*\"))) close_button() label = tk.Label(frame, text=filename_from_path(filename)) label.pack()", "padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() # Returns \"file name + .extension\"", "frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) # Holds string of one PDF file path FILENAME", "= tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg, font=\"Helvetica 18\")", "= tk.Button(root, text=\"Open a File\", padx=30, pady=5, fg=\"white\", bg=\"#5C1010\", justify=\"center\", command=add_file) open_file.pack() #", "Clears any previous PDF images before loading new file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame,", "name + .extension\" def filename_from_path(file): file_split = file.split(\"/\") return file_split[-1] # Starts Text-to-Speech", "tts from playsound import playsound from tkinter import * root = tk.Tk() root.title(\"Read", "popup = tk.Tk() popup.title(\"Read Aloud\") popup.iconbitmap(\"ReadAloud_icon.ico\") popup.geometry(\"1000x100\") popup.config(bg=\"darkgreen\") label = tk.Label(popup, text=msg, font=\"Helvetica", "set_pdf = pdf_var.pdf_view(frame, file_location = file, width = 120, height = 120) set_pdf.pack()", "audiobook_name = filename_from_path(FILENAME[-1]).split(\".\")[0].title() + \" Audiobook.mp3\" audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file", "PDF file path FILENAME = [] # PDF Status Indicators in a List", "Process def generate_tts(): if PDF_STATUS[-1]: audio_reader = pyttsx3.init() with open(FILENAME[-1], \"rb\") as file:", "Text-to-Speech Process def generate_tts(): if PDF_STATUS[-1]: audio_reader = pyttsx3.init() with open(FILENAME[-1], \"rb\") as", "tkinter.font import BOLD from tkinter import Canvas, filedialog from tkPDFViewer import tkPDFViewer as", "# PDF Status Indicators in a List of Booleans PDF_STATUS = [False] #", "tk.Button(frame, text=\"Close File\", padx=20, pady=7, fg=\"white\", bg=\"black\", command=close) close_file.pack() def close(): for widget", "file_split = file.split(\"/\") return file_split[-1] # Starts Text-to-Speech Process def generate_tts(): if PDF_STATUS[-1]:", "in a List of Booleans PDF_STATUS = [False] # Prevents duplicate Read Aloud", "import os, pyttsx3, tkinter as tk import PyPDF3 from tkinter.font import BOLD from", "Turn Your PDF Files into Audiobooks [Stanford Code in Place 2021 Python Project", "fill=\"x\", pady=10) B1 = tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() # Opens PDF", "bg=\"black\", command=close) close_file.pack() def close(): for widget in frame.winfo_children(): widget.destroy() FILENAME.clear() PDF_STATUS.append(False) root.mainloop()", "open(FILENAME[-1], \"rb\") as file: my_pdf = PyPDF3.PdfFileReader(file) pages = my_pdf.numPages my_text = \"\"", "Generates popup window after creating MP3 file def popup_msg(msg): popup = tk.Tk() popup.title(\"Read", "Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file", "highlightthickness=2) canvas.create_text(400, 45, text=\"Turn any PDF into an Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\",", "fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True) def close_button(): close_file = tk.Button(frame, text=\"Close", "PDF_STATUS[-1]: return True return False # Opens PDF/EPUB file for viewing and displays", "font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame = tk.Frame(root, bg=\"white\") frame.place(relwidth=0.8, relheight=0.8, relx=0.1,", "Place 2021 Python Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\",", "text=\"Turn any PDF into an Audiobook\", font=(\"Helvetica\", 21, BOLD), justify=\"center\", fill=\"white\") canvas.pack() frame", "def add_file(): if not pdf_opened(): for widget in frame.winfo_children(): widget.destroy() filename = filedialog.askopenfilename(initialdir=\"/clear\",", "popup.mainloop() # Opens PDF in frame def open_pdf(file): pdf_var = pdf.ShowPdf() # Clears", "Booleans PDF_STATUS = [False] # Prevents duplicate Read Aloud Buttons READ_ALOUD_STATUS = [False]", "tk.Button(popup, text=\"OK\", command=popup.destroy) B1.pack(pady=5, padx=5) popup.mainloop() # Opens PDF in frame def open_pdf(file):", "import PyPDF3 from tkinter.font import BOLD from tkinter import Canvas, filedialog from tkPDFViewer", "audio_reader.save_to_file(my_text, audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\") # Generates popup window after", "loading new file pdf_var.img_object_li.clear() set_pdf = pdf_var.pdf_view(frame, file_location = file, width = 120,", "audiobook_name) audio_reader.runAndWait() popup_msg(f\"Successfully generated MP3 file \\\"{audiobook_name}\\\"\") # Generates popup window after creating", "Project by <NAME>]\") root.iconbitmap(\"ReadAloud_icon.ico\") canvas = tk.Canvas(root, height=800, width=800, bg=\"#3F5A36\", highlightbackground=\"#3F5A36\", highlightthickness=2) canvas.create_text(400,", "= tk.Button(root, text=\"Generate Audiobook\", padx=5, pady=12, fg=\"white\", bg=\"#4B1B5B\", justify=\"center\", command=generate_tts) read_aloud.pack() READ_ALOUD_STATUS.append(True) PDF_STATUS.append(True)", "os, pyttsx3, tkinter as tk import PyPDF3 from tkinter.font import BOLD from tkinter", "Prevents duplicate Read Aloud Buttons READ_ALOUD_STATUS = [False] def pdf_opened(): if PDF_STATUS[-1]: return", "audio_reader = pyttsx3.init() with open(FILENAME[-1], \"rb\") as file: my_pdf = PyPDF3.PdfFileReader(file) pages =", "from tkPDFViewer import tkPDFViewer as pdf from gtts import gTTS, tts from playsound", "120) set_pdf.pack() # Creates read aloud button if not READ_ALOUD_STATUS[-1]: read_aloud = tk.Button(root," ]
[ "C++ package first') # ignore libraries already in python/treelite; only use ones in", "traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to bundle platform-specific binaries\"\"\" # pylint:", "libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or (not RT_PATH)", "libraries already in python/treelite; only use ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'):", "os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime PY_RT_SRC", "print('{}'.format(VERSION), file=f2) # Create a zipped package containing glue code for deployment with", "BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to bundle platform-specific binaries\"\"\" # pylint: disable=R0201 def has_ext_modules(self):", "os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime PY_RT_SRC =", "shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST'))", "LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite',", "C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or (not", "= os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath): # remove", "= tempfile.mkdtemp() # pylint: disable=W0201 return self.name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name)", "= os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME)", "RT_DEST = os.path.join('./treelite', RT_BASENAME) # remove stale copies of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST)", "os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite: toolbox for decision", "# ignore libraries already in python/treelite; only use ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0]))", "exc_type, exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to bundle platform-specific binaries\"\"\"", "file=f2) # Create a zipped package containing glue code for deployment with TemporaryDirectory()", "os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite: toolbox for decision tree", "version=VERSION, description='treelite: toolbox for decision tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy',", "'../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r') as", "description='treelite: toolbox for decision tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'],", "setup, Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def", "ignore libraries already in python/treelite; only use ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) ==", "= os.path.join('./treelite', RT_BASENAME) # remove stale copies of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if", "LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) # Paths for C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH", "os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST", "'./treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r') as f: VERSION =", "format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite: toolbox for decision tree deployment', url='http://treelite.io',", "remove stale copies of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST)", "os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r') as f: VERSION = f.readlines()[0].rstrip('\\n') with", "LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or (not RT_PATH) or", "def __enter__(self): self.name = tempfile.mkdtemp() # pylint: disable=W0201 return self.name def __exit__(self, exc_type,", "for decision tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={ 'treelite':", "setuptools import setup, Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\" # pylint:", "exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) # Paths for C/C++ libraries LIB_PATH =", "module\"\"\" return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY} # pylint: disable=W0122", "shutil import tempfile from setuptools import setup, Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context manager", "= '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r')", "from __future__ import print_function import os import shutil import tempfile from setuptools import", "return self.name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class", "exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to bundle platform-specific binaries\"\"\" #", "if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime'", "open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION), file=f2) # Create a zipped package containing glue", "as f2: print('{}'.format(VERSION), file=f2) # Create a zipped package containing glue code for", "copies of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST)", "or (not RT_PATH) or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the C++ package first')", "= LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or (not RT_PATH) or (not", "os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath): # remove compiled", "the C++ package first') # ignore libraries already in python/treelite; only use ones", "shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST):", "root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite: toolbox for decision tree deployment', url='http://treelite.io', author='DMLC',", "../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST", "stale copies of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0],", "self.name = tempfile.mkdtemp() # pylint: disable=W0201 return self.name def __exit__(self, exc_type, exc_value, traceback):", "<gh_stars>1-10 # coding: utf-8 \"\"\"Setup script\"\"\" from __future__ import print_function import os import", "pylint: disable=W0201 return self.name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides", "of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) #", "compiled lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION,", "'runtime', 'FILELIST')) if os.path.exists(libpath): # remove compiled lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime',", "LIBPATH = {'__file__': LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH)", "Distribution class to bundle platform-specific binaries\"\"\" # pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has an", "return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY,", "print_function import os import shutil import tempfile from setuptools import setup, Distribution, find_packages", "= os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME) # remove stale copies of library if", "shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite: toolbox for decision tree deployment',", "import shutil import tempfile from setuptools import setup, Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context", "already in python/treelite; only use ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del", "from setuptools import setup, Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\" #", "= os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'),", "__enter__(self): self.name = tempfile.mkdtemp() # pylint: disable=W0201 return self.name def __exit__(self, exc_type, exc_value,", "if os.path.exists(libpath): # remove compiled lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir),", "coding: utf-8 \"\"\"Setup script\"\"\" from __future__ import print_function import os import shutil import", "os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME) # remove stale copies", "f: VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION), file=f2) # Create", "shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to bundle platform-specific binaries\"\"\" # pylint: disable=R0201", "'w') as f2: print('{}'.format(VERSION), file=f2) # Create a zipped package containing glue code", "RT_BASENAME) # remove stale copies of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST)", "with open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION), file=f2) # Create a zipped package containing", "copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST)", "remove compiled lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite',", "disable=R0903 def __enter__(self): self.name = tempfile.mkdtemp() # pylint: disable=W0201 return self.name def __exit__(self,", "# remove compiled lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup(", "import tempfile from setuptools import setup, Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context manager for", "(not RT_PATH) or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the C++ package first') #", "== os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME)", "lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite:", "class TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def __enter__(self): self.name =", "'exec'), LIBPATH, LIBPATH) # Paths for C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH =", "os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if", "'FILELIST')) if os.path.exists(libpath): # remove compiled lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip',", "manager for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def __enter__(self): self.name = tempfile.mkdtemp() # pylint:", "disable=W0201 return self.name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution", "os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST =", "True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(),", "deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={ 'treelite': [LIB_BASENAME, RT_BASENAME, 'treelite_runtime.zip',", "= os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath): # remove compiled lib shutil.rmtree(libpath) if os.path.exists(filelist):", "# copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC,", "os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH,", "name='treelite', version=VERSION, description='treelite: toolbox for decision tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(),", "TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist =", "\"\"\"Has an extension module\"\"\" return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY}", "tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime',", "def has_ext_modules(self): \"\"\"Has an extension module\"\"\" return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH =", "if (not LIB_PATH) or (not RT_PATH) or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the", "RT_DEST) # copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST)", "in python/treelite; only use ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0]", "# pylint: disable=R0903 def __enter__(self): self.name = tempfile.mkdtemp() # pylint: disable=W0201 return self.name", "if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r') as f: VERSION = f.readlines()[0].rstrip('\\n')", "decision tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={ 'treelite': [LIB_BASENAME,", "LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if", "package first') # ignore libraries already in python/treelite; only use ones in ../lib", "os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME) # remove stale copies of library if os.path.exists(LIB_DEST):", "in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0])", "with open('../VERSION', 'r') as f: VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as f2:", "del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME =", "if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite: toolbox for", "LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or (not RT_PATH) or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile", "import setup, Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903", "tempfile from setuptools import setup, Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\"", "binaries\"\"\" # pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has an extension module\"\"\" return True LIBPATH_PY", "# pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has an extension module\"\"\" return True LIBPATH_PY =", "glue code for deployment with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath =", "Distribution, find_packages class TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def __enter__(self):", "toolbox for decision tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={", "os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the C++ package first') # ignore libraries already in", "a zipped package containing glue code for deployment with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/',", "use ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME", "find_packages class TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def __enter__(self): self.name", "RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or (not RT_PATH) or (not os.path.isdir('../build/runtime')): raise", "Paths for C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH)", "script\"\"\" from __future__ import print_function import os import shutil import tempfile from setuptools", "\"\"\"Overrides Distribution class to bundle platform-specific binaries\"\"\" # pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has", "code for deployment with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir,", "tempfile.mkdtemp() # pylint: disable=W0201 return self.name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) class", "shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime'", "compile the C++ package first') # ignore libraries already in python/treelite; only use", "an extension module\"\"\" return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY} #", "TemporaryDirectory(object): \"\"\"Context manager for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def __enter__(self): self.name = tempfile.mkdtemp()", "for C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or", "ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME =", "setup( name='treelite', version=VERSION, description='treelite: toolbox for decision tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>',", "= f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION), file=f2) # Create a zipped", "bundle platform-specific binaries\"\"\" # pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has an extension module\"\"\" return", "os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME", "def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to bundle", "has_ext_modules(self): \"\"\"Has an extension module\"\"\" return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__':", "RT_PATH) or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the C++ package first') # ignore", "# coding: utf-8 \"\"\"Setup script\"\"\" from __future__ import print_function import os import shutil", "LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or (not RT_PATH) or (not os.path.isdir('../build/runtime')):", "treelite.runtime PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with", "LIBPATH, LIBPATH) # Paths for C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime')", "libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath): #", "maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={ 'treelite': [LIB_BASENAME, RT_BASENAME, 'treelite_runtime.zip', 'VERSION'] }, distclass=BinaryDistribution )", "class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to bundle platform-specific binaries\"\"\" # pylint: disable=R0201 def", "# remove stale copies of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0],", "if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST =", "# pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) # Paths for C/C++", "RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST =", "# Create a zipped package containing glue code for deployment with TemporaryDirectory() as", "= os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME) # remove stale", "import os import shutil import tempfile from setuptools import setup, Distribution, find_packages class", "author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={ 'treelite': [LIB_BASENAME, RT_BASENAME, 'treelite_runtime.zip', 'VERSION'] },", "\"\"\"Context manager for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def __enter__(self): self.name = tempfile.mkdtemp() #", "filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath): # remove compiled lib shutil.rmtree(libpath) if", "pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) # Paths for C/C++ libraries", "package containing glue code for deployment with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime')))", "utf-8 \"\"\"Setup script\"\"\" from __future__ import print_function import os import shutil import tempfile", "python/treelite; only use ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0] del", "LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY,", "os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath): # remove compiled lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist)", "(not LIB_PATH) or (not RT_PATH) or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the C++", "PY_RT_SRC = '../runtime/native/python/treelite_runtime' PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION',", "{'__file__': LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) # Paths", "zipped package containing glue code for deployment with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir,", "os.path.exists(libpath): # remove compiled lib shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/')", "tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def __enter__(self): self.name = tempfile.mkdtemp() # pylint: disable=W0201 return", "pylint: disable=R0903 def __enter__(self): self.name = tempfile.mkdtemp() # pylint: disable=W0201 return self.name def", "LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) # Paths for", "os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite',", "VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION), file=f2) # Create a", "tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={ 'treelite': [LIB_BASENAME, RT_BASENAME,", "shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r') as f: VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION',", "to bundle platform-specific binaries\"\"\" # pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has an extension module\"\"\"", "LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME) # remove", "__future__ import print_function import os import shutil import tempfile from setuptools import setup,", "extension module\"\"\" return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH = {'__file__': LIBPATH_PY} # pylint:", "\"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) # Paths for C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite')", "for tempfile.mkdtemp()\"\"\" # pylint: disable=R0903 def __enter__(self): self.name = tempfile.mkdtemp() # pylint: disable=W0201", "# Paths for C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if (not", "raise RuntimeError('Please compile the C++ package first') # ignore libraries already in python/treelite;", "PY_RT_DEST) with open('../VERSION', 'r') as f: VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as", "'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath): # remove compiled lib shutil.rmtree(libpath)", "url='http://treelite.io', author='DMLC', maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={ 'treelite': [LIB_BASENAME, RT_BASENAME, 'treelite_runtime.zip', 'VERSION']", "'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath):", "= LIBPATH['find_lib_path'](basename='treelite_runtime') if (not LIB_PATH) or (not RT_PATH) or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please", "Create a zipped package containing glue code for deployment with TemporaryDirectory() as tempdir:", "LIB_PATH) or (not RT_PATH) or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the C++ package", "RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME) # remove stale copies of library", "= './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r') as f: VERSION", "containing glue code for deployment with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath", "'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir, 'runtime', 'FILELIST')) if os.path.exists(libpath): # remove compiled lib", "'r') as f: VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION), file=f2)", "deployment with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib'))", "shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r') as f: VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w')", "only use ones in ../lib if os.path.abspath(os.path.dirname(LIB_PATH[0])) == os.path.abspath('./treelite'): del LIB_PATH[0] del RT_PATH[0]", "or (not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the C++ package first') # ignore libraries", "os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME) #", "LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST = os.path.join('./treelite', RT_BASENAME) # remove stale copies of", "= {'__file__': LIBPATH_PY} # pylint: disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) #", "(not os.path.isdir('../build/runtime')): raise RuntimeError('Please compile the C++ package first') # ignore libraries already", "f2: print('{}'.format(VERSION), file=f2) # Create a zipped package containing glue code for deployment", "maintainer='<NAME>', maintainer_email='<EMAIL>', packages=find_packages(), install_requires=['numpy', 'scipy'], package_data={ 'treelite': [LIB_BASENAME, RT_BASENAME, 'treelite_runtime.zip', 'VERSION'] }, distclass=BinaryDistribution", "base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite: toolbox for decision tree deployment', url='http://treelite.io', author='DMLC', maintainer='<NAME>',", "if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy treelite.runtime", "disable=R0201 def has_ext_modules(self): \"\"\"Has an extension module\"\"\" return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py') LIBPATH", "pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has an extension module\"\"\" return True LIBPATH_PY = os.path.abspath('./treelite/libpath.py')", "for deployment with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime',", "disable=W0122 exec(compile(open(LIBPATH_PY, \"rb\").read(), LIBPATH_PY, 'exec'), LIBPATH, LIBPATH) # Paths for C/C++ libraries LIB_PATH", "as f: VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION), file=f2) #", "platform-specific binaries\"\"\" # pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has an extension module\"\"\" return True", "shutil.rmtree(libpath) if os.path.exists(filelist): os.remove(filelist) shutil.make_archive(base_name='./treelite/treelite_runtime', format='zip', root_dir=os.path.abspath(tempdir), base_dir='runtime/') setup( name='treelite', version=VERSION, description='treelite: toolbox", "first') # ignore libraries already in python/treelite; only use ones in ../lib if", "self.name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to", "RuntimeError('Please compile the C++ package first') # ignore libraries already in python/treelite; only", "PY_RT_DEST = './treelite/runtime/treelite_runtime' if os.path.exists(PY_RT_DEST): shutil.rmtree(PY_RT_DEST) shutil.copytree(PY_RT_SRC, PY_RT_DEST) with open('../VERSION', 'r') as f:", "os import shutil import tempfile from setuptools import setup, Distribution, find_packages class TemporaryDirectory(object):", "import print_function import os import shutil import tempfile from setuptools import setup, Distribution,", "LIBPATH) # Paths for C/C++ libraries LIB_PATH = LIBPATH['find_lib_path'](basename='treelite') RT_PATH = LIBPATH['find_lib_path'](basename='treelite_runtime') if", "open('../VERSION', 'r') as f: VERSION = f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION),", "# pylint: disable=W0201 return self.name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution):", "f.readlines()[0].rstrip('\\n') with open('./treelite/VERSION', 'w') as f2: print('{}'.format(VERSION), file=f2) # Create a zipped package", "as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist = os.path.abspath(os.path.join(tempdir,", "__exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) class BinaryDistribution(Distribution): \"\"\"Overrides Distribution class to bundle platform-specific", "LIB_PATH[0] del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0])", "os.path.join('./treelite', RT_BASENAME) # remove stale copies of library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST):", "\"\"\"Setup script\"\"\" from __future__ import print_function import os import shutil import tempfile from", "del RT_PATH[0] LIB_BASENAME = os.path.basename(LIB_PATH[0]) LIB_DEST = os.path.join('./treelite', LIB_BASENAME) RT_BASENAME = os.path.basename(RT_PATH[0]) RT_DEST", "with TemporaryDirectory() as tempdir: shutil.copytree('../runtime/native/', os.path.abspath(os.path.join(tempdir, 'runtime'))) libpath = os.path.abspath(os.path.join(tempdir, 'runtime', 'lib')) filelist", "library if os.path.exists(LIB_DEST): os.remove(LIB_DEST) if os.path.exists(RT_DEST): os.remove(RT_DEST) shutil.copy(LIB_PATH[0], LIB_DEST) shutil.copy(RT_PATH[0], RT_DEST) # copy", "class to bundle platform-specific binaries\"\"\" # pylint: disable=R0201 def has_ext_modules(self): \"\"\"Has an extension" ]
[ "or fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue if os.path.isfile(os.path.join(fn,", "the /config, and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n',", "[ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to your own playground\\r\\n\\r\\nEdit the /config,", "prefix + name + '/', package)) else: bad_name = False for pattern in", "import fnmatchcase from distutils.util import convert_path standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*')", "if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package = name else: new_package = package", "os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package = name else: new_package = package +", "includes both short examples you can insert directly into\\r\\nyour own playground, and longer", "the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to your own playground\\r\\n\\r\\nEdit the", "in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True break", "here.\\r\\n\\r\\n - Some Playground examples for the Document Assembly Line project.\\r\\n - Generic", "examples that you can discover from its landing page: Quinten please add the", "address a particular need.\\r\\n \\r\\nTo learn more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes)", "your own playground\\r\\n\\r\\nEdit the /config, and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n", "exclude_directories=standard_exclude_directories): out = {} stack = [(convert_path(where), '', package)] while stack: where, prefix,", "= {} stack = [(convert_path(where), '', package)] while stack: where, prefix, package =", "'*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')", "False for pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name", "and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine',", "fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern in exclude_directories:", "for pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name =", "project.\\r\\n - Generic docassemble recipe interviews to address a particular need.\\r\\n \\r\\nTo learn", "Add examples to your own playground\\r\\n\\r\\nEdit the /config, and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground", "Document Assembly Line project.\\r\\n - Generic docassemble recipe interviews to address a particular", "link here.\\r\\n\\r\\n - Some Playground examples for the Document Assembly Line project.\\r\\n -", "== pattern.lower()): bad_name = True break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out", "new_package = name else: new_package = package + '.' + name stack.append((fn, '',", "longer examples that you can discover from its landing page: Quinten please add", "os import sys from setuptools import setup, find_packages from fnmatch import fnmatchcase from", "'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack = [(convert_path(where),", "playground\\r\\n\\r\\nEdit the /config, and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml", "setuptools import setup, find_packages from fnmatch import fnmatchcase from distutils.util import convert_path standard_exclude", "and longer examples that you can discover from its landing page: Quinten please", "in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern", "name + '/', package)) else: bad_name = False for pattern in exclude: if", "else: bad_name = False for pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower()", "both short examples you can insert directly into\\r\\nyour own playground, and longer examples", "distutils.util import convert_path standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*',", "\\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT License", "name) if os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if (fnmatchcase(name, pattern)", "- docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT License (MIT)', url='https://docassemble.org', packages=find_packages(), namespace_packages=['docassemble'],", "need.\\r\\n \\r\\nTo learn more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples", "description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both short examples you can", "\\r\\nTo learn more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to", "'*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.',", "fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue out.setdefault(package, []).append(prefix+name) return", "'', package)] while stack: where, prefix, package = stack.pop(0) for name in os.listdir(where):", "Some Playground examples for the Document Assembly Line project.\\r\\n - Generic docassemble recipe", "'/', package)) else: bad_name = False for pattern in exclude: if (fnmatchcase(name, pattern)", "[(convert_path(where), '', package)] while stack: where, prefix, package = stack.pop(0) for name in", "docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both short examples you can insert", "website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to your own playground\\r\\n\\r\\nEdit the /config, and", "package = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn):", "name stack.append((fn, '', new_package)) else: stack.append((fn, prefix + name + '/', package)) else:", "= os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if", "page: Quinten please add the link here.\\r\\n\\r\\n - Some Playground examples for the", "add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>',", "- Generic docassemble recipe interviews to address a particular need.\\r\\n \\r\\nTo learn more,", "name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False for", "def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack = [(convert_path(where), '', package)]", "[]).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes", "(fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue", "playground, and longer examples that you can discover from its landing page: Quinten", "to your own playground\\r\\n\\r\\nEdit the /config, and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n -", "= False for pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()):", "extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both short examples you can insert directly", "if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package = name else:", "you can discover from its landing page: Quinten please add the link here.\\r\\n\\r\\n", "can discover from its landing page: Quinten please add the link here.\\r\\n\\r\\n -", "{} stack = [(convert_path(where), '', package)] while stack: where, prefix, package = stack.pop(0)", "its landing page: Quinten please add the link here.\\r\\n\\r\\n - Some Playground examples", "learn more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to your", "package: new_package = name else: new_package = package + '.' + name stack.append((fn,", "a particular need.\\r\\n \\r\\nTo learn more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n##", "pattern) or fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue if", "Assembly Line project.\\r\\n - Generic docassemble recipe interviews to address a particular need.\\r\\n", "from fnmatch import fnmatchcase from distutils.util import convert_path standard_exclude = ('*.pyc', '*~', '.*',", "where, prefix, package = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name)", "if not package: new_package = name else: new_package = package + '.' +", "that you can discover from its landing page: Quinten please add the link", "\\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT License (MIT)', url='https://docassemble.org', packages=find_packages(), namespace_packages=['docassemble'], install_requires=['mechanize>=0.4.7'], zip_safe=False,", "os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower()", "= name else: new_package = package + '.' + name stack.append((fn, '', new_package))", "for pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name =", "discover from its landing page: Quinten please add the link here.\\r\\n\\r\\n - Some", "'_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {}", "/config, and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown',", "please add the link here.\\r\\n\\r\\n - Some Playground examples for the Document Assembly", "fnmatchcase from distutils.util import convert_path standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories", "('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out", "examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT License (MIT)',", "package)) else: bad_name = False for pattern in exclude: if (fnmatchcase(name, pattern) or", "interviews to address a particular need.\\r\\n \\r\\nTo learn more, visit the [ALDocumentation website", "if os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if (fnmatchcase(name, pattern) or", "bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package = name else: new_package", "= True break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A", "bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n##", "short examples you can insert directly into\\r\\nyour own playground, and longer examples that", "recipe interviews to address a particular need.\\r\\n \\r\\nTo learn more, visit the [ALDocumentation", "= True break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package", "author_email='<EMAIL>', license='The MIT License (MIT)', url='https://docassemble.org', packages=find_packages(), namespace_packages=['docassemble'], install_requires=['mechanize>=0.4.7'], zip_safe=False, package_data=find_package_data(where='docassemble/ALRecipes/', package='docassemble.ALRecipes'), )", "Generic docassemble recipe interviews to address a particular need.\\r\\n \\r\\nTo learn more, visit", "= ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):", "if bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'), long_description='#", "the Document Assembly Line project.\\r\\n - Generic docassemble recipe interviews to address a", "own playground, and longer examples that you can discover from its landing page:", "stack = [(convert_path(where), '', package)] while stack: where, prefix, package = stack.pop(0) for", "break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'),", "docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT License (MIT)', url='https://docassemble.org', packages=find_packages(),", "out = {} stack = [(convert_path(where), '', package)] while stack: where, prefix, package", "out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both short", "stack.append((fn, prefix + name + '/', package)) else: bad_name = False for pattern", "directly into\\r\\nyour own playground, and longer examples that you can discover from its", "'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out =", "+ '/', package)) else: bad_name = False for pattern in exclude: if (fnmatchcase(name,", "own playground\\r\\n\\r\\nEdit the /config, and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n -", "stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name =", "setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both short examples", "the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The", "bad_name = False for pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() ==", "out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository", "else: new_package = package + '.' + name stack.append((fn, '', new_package)) else: stack.append((fn,", "from its landing page: Quinten please add the link here.\\r\\n\\r\\n - Some Playground", "visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to your own playground\\r\\n\\r\\nEdit", "following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n - docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT", "in exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True break", "find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack = [(convert_path(where), '', package)] while", "= [(convert_path(where), '', package)] while stack: where, prefix, package = stack.pop(0) for name", "os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if (fnmatchcase(name,", "== pattern.lower()): bad_name = True break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if", "continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package = name else: new_package =", "pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True", "exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True break if", "standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',", "return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both", "version='0.1.0', description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both short examples you", "find_packages from fnmatch import fnmatchcase from distutils.util import convert_path standard_exclude = ('*.pyc', '*~',", "to address a particular need.\\r\\n \\r\\nTo learn more, visit the [ALDocumentation website -", "Quinten please add the link here.\\r\\n\\r\\n - Some Playground examples for the Document", "False for pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name", "+ name + '/', package)) else: bad_name = False for pattern in exclude:", "you can insert directly into\\r\\nyour own playground, and longer examples that you can", "examples for the Document Assembly Line project.\\r\\n - Generic docassemble recipe interviews to", "'./dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack =", "break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package = name", "'*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='',", "continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble extension.'), long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis", "examples you can insert directly into\\r\\nyour own playground, and longer examples that you", "examples to your own playground\\r\\n\\r\\nEdit the /config, and add the following: \\r\\n\\r\\n```yaml\\r\\nplayground examples:\\r\\n", "- ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to your own playground\\r\\n\\r\\nEdit the /config, and add", "the link here.\\r\\n\\r\\n - Some Playground examples for the Document Assembly Line project.\\r\\n", "for the Document Assembly Line project.\\r\\n - Generic docassemble recipe interviews to address", "exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack = [(convert_path(where), '', package)] while stack: where,", "bad_name = True break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package:", "pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True", "- docassemble.ALRecipes:data/questions/examples.yml\\r\\n - docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT License (MIT)', url='https://docassemble.org',", "stack: where, prefix, package = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where,", "'.' + name stack.append((fn, '', new_package)) else: stack.append((fn, prefix + name + '/',", "'./build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack", "repository includes both short examples you can insert directly into\\r\\nyour own playground, and", "= ('*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist',", "add the link here.\\r\\n\\r\\n - Some Playground examples for the Document Assembly Line", "+ '.' + name stack.append((fn, '', new_package)) else: stack.append((fn, prefix + name +", "standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude,", "sys from setuptools import setup, find_packages from fnmatch import fnmatchcase from distutils.util import", "pattern.lower()): bad_name = True break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not", "or fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue out.setdefault(package, []).append(prefix+name)", "Line project.\\r\\n - Generic docassemble recipe interviews to address a particular need.\\r\\n \\r\\nTo", "stack.append((fn, '', new_package)) else: stack.append((fn, prefix + name + '/', package)) else: bad_name", "else: stack.append((fn, prefix + name + '/', package)) else: bad_name = False for", "Playground examples for the Document Assembly Line project.\\r\\n - Generic docassemble recipe interviews", "'*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack = [(convert_path(where), '',", "while stack: where, prefix, package = stack.pop(0) for name in os.listdir(where): fn =", "from setuptools import setup, find_packages from fnmatch import fnmatchcase from distutils.util import convert_path", "not package: new_package = name else: new_package = package + '.' + name", "fnmatch import fnmatchcase from distutils.util import convert_path standard_exclude = ('*.pyc', '*~', '.*', '*.bak',", "True break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package =", "insert directly into\\r\\nyour own playground, and longer examples that you can discover from", "prefix, package = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if", "long_description='# docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both short examples you can insert directly into\\r\\nyour", "particular need.\\r\\n \\r\\nTo learn more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add", "package)] while stack: where, prefix, package = stack.pop(0) for name in os.listdir(where): fn", "convert_path standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs',", "for name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False", "+ name stack.append((fn, '', new_package)) else: stack.append((fn, prefix + name + '/', package))", "bad_name = False for pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower() ==", "can insert directly into\\r\\nyour own playground, and longer examples that you can discover", "page.\\r\\n\\r\\n## Add examples to your own playground\\r\\n\\r\\nEdit the /config, and add the following:", "new_package = package + '.' + name stack.append((fn, '', new_package)) else: stack.append((fn, prefix", "exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True break if", "bad_name = True break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0',", "import setup, find_packages from fnmatch import fnmatchcase from distutils.util import convert_path standard_exclude =", "= stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name", "package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack = [(convert_path(where), '', package)] while stack:", "True break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes', version='0.1.0', description=('A docassemble", "- Some Playground examples for the Document Assembly Line project.\\r\\n - Generic docassemble", "setup, find_packages from fnmatch import fnmatchcase from distutils.util import convert_path standard_exclude = ('*.pyc',", "landing page: Quinten please add the link here.\\r\\n\\r\\n - Some Playground examples for", "('*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO',", "if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True break if bad_name:", "more, visit the [ALDocumentation website - ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to your own", "author='AssemblyLine', author_email='<EMAIL>', license='The MIT License (MIT)', url='https://docassemble.org', packages=find_packages(), namespace_packages=['docassemble'], install_requires=['mechanize>=0.4.7'], zip_safe=False, package_data=find_package_data(where='docassemble/ALRecipes/', package='docassemble.ALRecipes'),", "long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT License (MIT)', url='https://docassemble.org', packages=find_packages(), namespace_packages=['docassemble'], install_requires=['mechanize>=0.4.7'], zip_safe=False, package_data=find_package_data(where='docassemble/ALRecipes/',", "= False for pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()):", "from distutils.util import convert_path standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories =", "docassemble.ALRecipes\\r\\n\\r\\n## Content\\r\\nThis repository includes both short examples you can insert directly into\\r\\nyour own", "import os import sys from setuptools import setup, find_packages from fnmatch import fnmatchcase", "'.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def", "'__init__.py')): if not package: new_package = name else: new_package = package + '.'", "= package + '.' + name stack.append((fn, '', new_package)) else: stack.append((fn, prefix +", "package + '.' + name stack.append((fn, '', new_package)) else: stack.append((fn, prefix + name", "new_package)) else: stack.append((fn, prefix + name + '/', package)) else: bad_name = False", "os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern in", "name else: new_package = package + '.' + name stack.append((fn, '', new_package)) else:", "import sys from setuptools import setup, find_packages from fnmatch import fnmatchcase from distutils.util", "'', new_package)) else: stack.append((fn, prefix + name + '/', package)) else: bad_name =", "fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')):", "pattern) or fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue out.setdefault(package,", "Content\\r\\nThis repository includes both short examples you can insert directly into\\r\\nyour own playground,", "docassemble recipe interviews to address a particular need.\\r\\n \\r\\nTo learn more, visit the", "into\\r\\nyour own playground, and longer examples that you can discover from its landing", "docassemble.base:data/questions/example-list.yml \\r\\n```\\r\\n\\r\\n', long_description_content_type='text/markdown', author='AssemblyLine', author_email='<EMAIL>', license='The MIT License (MIT)', url='https://docassemble.org', packages=find_packages(), namespace_packages=['docassemble'], install_requires=['mechanize>=0.4.7'],", "ALRecipes](https://suffolklitlab.org/docassemble-AssemblyLine-documentation/docs/framework/alrecipes) page.\\r\\n\\r\\n## Add examples to your own playground\\r\\n\\r\\nEdit the /config, and add the", "import convert_path standard_exclude = ('*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS',", "pattern.lower()): bad_name = True break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.ALRecipes'," ]
[ "thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): ''' Helper function which returns path", "_ in self.image_result_layers if _.uid == uid] if len(layers) > 0: return layers[0]", "from db, filters for type and returns dictionary of format {\"UID_NAME\": UID} refresh_from_db():", "used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel): ''' A class", "= fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None): ''' calls crud.delete_image", "if \"uid\" == -2. Creates image in database which generates path and id.", "by default. List of all associated IntImageResultLayer objects. result_measurements : List[IntResultMeasurement] empty list", "db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index]", "self.uid = sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): '''", "are reloaded from db. ''' db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer:", "List[int], suffix: str = None): ''' Method to estimate ground truth from multiple", "None: Returns layer with corresponding id, returns None if id is not found", "file storage. Uses default session if none is passed. ''' uid: int series_index:", "from app.api.classes_com import ComImage from app.api.dependencies import check_sess from pydantic import BaseModel, constr", "selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self, layer_id: int): ''' Method to measure mask", "1: dims = np.array([y, x]) dims = dims/dims.max() else: dims = None print(\"Couldn't", "fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else: kwargs[\"data\"] = None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path =", "returns None if id is not found in self.image_result_layers calculate_background() -> List: Returns", "= fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for", "classifiers from db, filters for type and returns dictionary of format {\"UID_NAME\": UID}", "when image was imported has_bg_layer : bool False by default. Indicates if image", "= updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id =", "it to db and file storage. get_classifiers(clf_type: str) -> dict: Fetches all saved", "Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object as int_class. Loads layer array from file path", "updates the objects attributes. delete_result_layer(layer_id: int): Deletes the layer from database, file storage", "original_filename # save zarr fsr.save_zarr(self.data, db_image.path_image) # save metadata dict fsr.save_json(self.metadata, db_image.path_metadata) #", "clf_dict[\"No classifers found\"] = None return clf_dict def refresh_from_db(self): ''' Requests current information", "str = \"\" empty string by default. brief description of the object has_bg_layer:", "-> app.api.classes_internal.IntResultLayer | None: Returns layer with corresponding id, returns None if id", "in the frontend data : Any ___TO BE DONE: add custom field type___", "suffix: str): Fetches given layers and uses SimpleITKs STAPLE algorithm to estimate ground", "int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask shape {mask.shape} does not match image shape", "select_channel(self, channel: int): ''' Helper method expects channel index. Returns deep copy of", "- layer_id(int): Id of result layer to be deleted. ''' layer = self.select_result_layer(layer_id)", "self.image_result_layers if _.uid == uid] if len(layers) > 0: return layers[0] else: warnings.warn(", "objects path and uid attributes accordingly. Uses default session if none is passed.", "are generated and updated in object. Parameters: - sess(sqlalchemy.orm.Session): The database session to", "if layer_id == self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str", "no session is passed default session will be used (app.api.dependencies.get_db). ''' sess =", "list by default. List of all associated IntResultMeasurement objects tags : Set[str]: empty", "= [] result_measurements: List[IntResultMeasurement] = [] tags: Set[str] = set() data: Any metadata_omexml:", "as layer selected as background_layer. bg_layer_id : int, optional None if no bg_layer", "work, some may cause trouble. metadata_omexml : Any original metadata xml data as", "name import warnings import xml from pathlib import Path from typing import Any,", "data : Any ___TO BE DONE: add custom field type___ array of shape", "List[DbResultMeasurement] = [] tags: Set[str] = [] def to_int_class(self, for_refresh=False): ''' Returns object", "sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): ''' Refreshes object", "background. Returns list of length n_channel with mean intensity of measured pixels. '''", "image's metadata will be saved to .json and loaded. hint : str, optional", "has an associated background layer. bg_layer_id: int, optional None if no associated background", "session if none is passed. ''' uid: int series_index: int name: str hint:", "int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db()", "to be background. Returns list of length n_channel with mean intensity of measured", "not found in self.image_result_layers calculate_background() -> List: Returns list of length n_channels. List", "images array \".zarr\" folder. Automatically generated as image is saved to database. image_result_layers:", "image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as background layer. ''' layer_uid = image_layer.uid db_image", "= np.where(bg_mask, channel_data, 0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self, layer_id:", "delete_from_system(self, sess=None): ''' calls crud.delete_image and passed db_image object to delete all associated", "if _.uid == uid] if len(layers) > 0: return layers[0] else: warnings.warn( f\"IntImage.select", "Any ___TO BE DONE: add custom field type___ array of shape (z,c,y,x) in", "class representation to db class representation. ''' db_image_result_layers = [result_layer.to_db_class() for result_layer in", "belong to a result layer of this image. Result layer will be turned", "(app.api.dependencies.get_db) ''' sess = check_sess(sess) sql_image = crud.create_image(self, sess) self.uid = sql_image.id self.path_image", "self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask = bg_layer.data else: bg_mask =", "truth estimation layer will be binarized, all labels > 0 will be unified", "return clf_dict def refresh_from_db(self): ''' Requests current information from db and updates the", "for calculation. Saves label layer to image, database and file storage. Parameters: -", "\"\" empty string by default. brief description of the object has_bg_layer: bool =", "db_image.refresh_from_db() self.name = updated_info.name self.hint = updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers", "bg_layer_id property to given value. Parameters: - layer_uid(int): uid of result layer to", "to a result layer of this image. Result layer will be turned to", "1: dims = np.array([z, y, x]) dims = dims/dims.max() elif n_z == 1:", "image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path): mask =", "data of this image from db and updates the objects attributes. delete_result_layer(layer_id: int):", "database. Uses default session if none is passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute", "files, most numpy operations work, some may cause trouble. metadata_omexml : Any original", "to be selected as background layer. ''' layer_uid = image_layer.uid db_image = self.to_db_class()", "set_bg_false(self): ''' Helper function to set has_bg_layer to False and bg_layer_id to None.", "int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db()", "suffix = \"_\" + suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in layer_id_list]", "crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}: clf_dict[\"No classifers found\"] = None return clf_dict def", "and saves it to db and file storage. get_classifiers(clf_type: str) -> dict: Fetches", "layer is selected as background layer, the attributes \"has_bg_layer\" and \"bg_layer_id\" are set", "None if no scaling information was provided in metadata. ''' x = self.metadata['pixel_size_physical_x']", "The database session to be used, if no session is passed default session", "shape: (n_labels, n_channel, n_features), n_features == 2 (n_pixels, sum_pixels) Parameters: - layer_id(int): uid", "classifier type, for available types see app.api.cfg_classes.classifier_types. ''' # Fetches dict in form", "be deleted. ''' layer = self.select_result_layer(layer_id) if layer_id == self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db()", "fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): ''' Helper function which returns path to the thumbnail", "def select_result_layer(self, uid: int): layers = [_ for _ in self.image_result_layers if _.uid", "db and updates the objects attributes. delete_result_layer(layer_id: int): Deletes the layer from database,", "- suffix(str): will be appended to layer name. ''' if suffix == None:", "None return clf_dict def refresh_from_db(self): ''' Requests current information from db and updates", "in database. Uses default session if none is passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\"", "from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import", "default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess)", "entries Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no session", "self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): ''' Helper function which returns path to the", "DbImage object. set_bg_false(): Helper function to set has_bg_layer to False and bg_layer_id to", "of all associated DbImageResultLayer objects measurements: List[DbResultMeasurement] = [] emtpy list by default.", "and file storage. Parameters: - layer_id_list(List[int]): List of layer ids to be used", "default. brief description of the object experiment_ids: List[int] empty list by default. List", "[] result_measurements: List[IntResultMeasurement] = [] tags: Set[str] = set() data: Any metadata_omexml: Any", "values for each channel if background layer is defined and zeros if no", "== False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else: kwargs[\"data\"] = None metadata", "as DbImage object. set_bg_false(): Helper function to set has_bg_layer to False and bg_layer_id", "in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint = f\"Following Label Layers were used", "-2. Creates image in database which generates path and id. ''' if self.uid", "will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid, sess,", "be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self, sess=None):", "object as com_class. set_bg_false(sess = None) sets \"has_bg_layer\" property to False in db.", ": Any original metadata xml data as read by bioformats import when image", "new_hint(str): string to be saved. - sess(sqlalchemy.orm.Session): The database session to be used,", "none is passed. refresh_from_db() -> DbImage Fetches image from database and returns DbImage", "= self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer in self.image_result_layers]", "- new_hint(str): string to be saved. - sess(sqlalchemy.orm.Session): The database session to be", "= \"_\" + suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in layer_id_list] ground_truth_estimation_array", "generates path and id. ''' if self.uid == -1: db_image = self.to_db_class() db_image.create_in_db()", "here metadata : dict reduced metadata for easy use within mistos. As created", "- clf_type(str): Valid classifier type, for available types see app.api.cfg_classes.classifier_types. ''' # Fetches", "mask.shape == (1, image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] = mask", "self.to_db_class() db_image.create_in_db() self.uid = db_image.uid print(f\"Importing archived Mistos image with id {self.uid}\") fsr.save_zarr(self.data,", "scaling information was provided in metadata. ''' x = self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y']", "suffix(str): will be appended to layer name. ''' if suffix == None: suffix", "n_z == 1: dims = np.array([y, x]) dims = dims/dims.max() else: dims =", "sess=None): ''' Creates object in db. Paths and id are generated and updated", ": int the objects unique identifier series_index : int index of image if", "name : str the objects name hint : str = \"\" empty string", "sess=None): ''' This function expects a new hint as string and calls crud.update_image_hint", "for given result layer and saves it to db and file storage. get_classifiers(clf_type:", "layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as background layer. ''' layer_uid", "has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return db_image def set_bg_false(self): ''' Helper", "self.data.shape[2], self.data.shape[3] )) if bg_mask.max() < 2: bg_mask = np.where(bg_mask > 0, 1,", "image_result_layers: List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement] = [] tags: Set[str] = [] def", "STAPLE probabilities. For ground truth estimation layer will be binarized, all labels >", "check_sess(sess) sql_image = crud.create_image(self, sess) self.uid = sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata =", "uid attributes accordingly. Uses default session if none is passed. refresh_from_db() -> DbImage", "self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer): ''' Method to set layer as", "layer will be binarized, all labels > 0 will be unified and represented", "dims = dims/dims.max() else: dims = None print(\"Couldn't calculate scaling from metadata, defaulting", "result_measurements: List[IntResultMeasurement] = [] tags: Set[str] = set() data: Any metadata_omexml: Any has_bg_layer:", "Returns path as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads pixel dimensions", "update_hint(self, new_hint: str, sess=None): ''' This function expects a new hint as string", "background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as background layer. '''", "= None): creates object in database, updates objects path and uid attributes accordingly.", "default. brief description of the object has_bg_layer: bool = False indicator if image", "to None\") return dims def to_db_class(self): ''' Transforms internal class representation to db", "Helper function which returns path to the thumbnail on fileserver. get_image_scaling(): Returns dimensions", "= False bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] = [] measurements:", "def set_bg_false(self, sess=None): ''' Sets imagaes has_bg_layer property to False in database. Parameters:", "by default. List of all associated DbImageResultLayer objects measurements: List[DbResultMeasurement] = [] emtpy", "saved to database. image_result_layers: List[DbImageResultLayer] = [] emtpy list by default. List of", "(creates metadata dict for whole series) Series metadatadict is passed into IntImage.on_init(). Thereafter,", "in array with shape (z,y,x) or None if no scaling information was provided", "List of all associated IntResultMeasurement objects tags : Set[str]: empty set by default.", "db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid print(f\"Importing archived Mistos image with id", "def measure_mask_in_image(self, layer_id: int): ''' Method to measure mask and save result as", "layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return measurement_result def get_classifiers(self,", "Optional[int] path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement] = []", "in database and file storage get_thumbnail_path(): Helper function which returns path to the", "Images Attributes ---------- uid : int the objects unique identifier series_index : int", "passed into IntImage.on_init(). Thereafter, only image's metadata will be saved to .json and", "saved in database and file storage get_thumbnail_path(): Helper function which returns path to", "db_image.create_in_db() self.uid = db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename", "sess, for_refresh=True) return updated_db_image def update_hint(self, new_hint: str, sess=None): ''' This function expects", "for n in range(n_channel): channel_data = self.select_channel(n) selection = np.where(bg_mask, channel_data, 0) _mean", "already loaded if for_refresh == False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else:", "db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer): ''' Method to set layer as background", "updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags = updated_info.tags", "List of all associated DbResultMeasurement objects tags: Set[str] = [] set of string", "db entries Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no", "used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid: int, sess=None):", "# save metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string =", "data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path): mask = utils_import.read_mask(path) if", "and \"bg_layer_id\" are set to False and None. Parameters: - layer_id(int): Id of", "selected as background layer. ''' layer_uid = image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db()", "length n_channel with mean intensity of measured pixels. ''' if self.has_bg_layer: bg_uid =", "= \"\" experiment_ids: List[int] = [] image_result_layers: List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement] =", "to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set layer as background layer select_channel(channel: int)", "to the images metadata \".json\". Automatically generated as image is saved to database.", "id, returns None if id is not found in self.image_result_layers calculate_background() -> List:", "dict hint: Optional[str] = \"\" experiment_ids: List[int] = [] image_result_layers: List[IntImageResultLayer] = []", "for measurement in self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id", "{name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}: clf_dict[\"No classifers found\"] =", "app.api.classes_internal.IntResultLayer | None: Returns layer with corresponding id, returns None if id is", "import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import ComImage from app.api.dependencies import check_sess from pydantic", "-1 and as imported Mistos image if \"uid\" == -2. Creates image in", "bool = False bg_layer_id: Optional[int] def on_init(self): ''' Method to initialize the object.", "path to the images array \".zarr\" folder. Automatically generated as image is saved", "reduced metadata for easy use within mistos. As created by app.api.utils_import.acquire_metadata_dict (creates metadata", "thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): ''' Helper", "self.uid, sess, for_refresh=True) return updated_db_image def update_hint(self, new_hint: str, sess=None): ''' This function", "single file name : str the objects name hint : str = \"\"", "brief description of the object has_bg_layer: bool = False indicator if image has", "measured pixels. ''' if self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask =", "current information from db and updates the object's attributes accordingly. Does not reload", "array with shape (z,y,x) or None if no scaling information was provided in", "be used for ground truth estimation. Must belong to this image. - suffix(str):", "metadata : dict reduced metadata for easy use within mistos. As created by", "for available types see app.api.cfg_classes.classifier_types. ''' # Fetches dict in form {name: id}", "the objects attributes. delete_result_layer(layer_id: int): Deletes the layer from database, file storage and", ": Any ___TO BE DONE: add custom field type___ array of shape (z,c,y,x)", "DONE: add custom field type___ array of shape (z,c,y,x) in which the image", "app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import ComImage from app.api.dependencies import check_sess from", "default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.delete_image(self, sess) class", "tags: Set[str] = [] def to_int_class(self, for_refresh=False): ''' Returns object as int class.", "used, if no session is passed default session will be used (app.api.dependencies.get_db). '''", "layers \\n{layers}\", UserWarning) return None def calculate_background(self): ''' Expects the bg_uid to belong", "of all associated DbResultMeasurement objects tags: Set[str] = [] set of string keywords", "''' uid: int series_index: int name: str hint: str = \"\" has_bg_layer: bool", "as np from app import crud from app import fileserver_requests as fsr from", "empty list by default. List of all associated IntImageResultLayer objects. result_measurements : List[IntResultMeasurement]", "== -1: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata", "= self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): ''' Helper function which returns path to", "layer will be initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int name: str", "for type and returns dictionary of format {\"UID_NAME\": UID} refresh_from_db(): Fetches data of", "(app.api.dependencies.get_db) ''' sess = check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid, sess, for_refresh=True) return updated_db_image", "self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement( image_array, layer.data) measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name),", "db and file storage. get_classifiers(clf_type: str) -> dict: Fetches all saved classifiers from", "metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\")", "result_measurements : List[IntResultMeasurement] empty list by default. List of all associated IntResultMeasurement objects", "select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None: Returns layer with corresponding id, returns None", "function to set has_bg_layer to False and bg_layer_id to None. Attribute is changed", "object. update_hint(new_hint: str, sess = None): updates objects hint in database. Uses default", "types see app.api.cfg_classes.classifier_types. ''' # Fetches dict in form {name: id} clf_dict =", "Set[str] = [] def to_int_class(self, for_refresh=False): ''' Returns object as int class. Parameters:", "in database. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no", "''' Sets imagaes has_bg_layer property to False in database. Parameters: - sess(sqlalchemy.orm.Session): The", "sess=None): ''' Refreshes object image from db. Parameters: - sess(sqlalchemy.orm.Session): The database session", "has_bg_layer: bool = False bg_layer_id: Optional[int] def on_init(self): ''' Method to initialize the", "return channel_data def select_result_layer(self, uid: int): layers = [_ for _ in self.image_result_layers", "calculate_background() -> List: Returns list of length n_channels. List holds the mean pixel", "# Only load the full image if not already loaded if for_refresh ==", "List holds the mean pixel values for each channel if background layer is", "self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename # save zarr fsr.save_zarr(self.data, db_image.path_image) # save metadata dict", "self.refresh_from_db() return measurement_result def get_classifiers(self, clf_type: str): ''' Loads all classifiers of given", "Label Layers were used to estimate the ground truth: {layer_id_list}\" int_result_layer = IntImageResultLayer(", "int class. Parameters: - for_refresh(bool = False): If True, image array is not", ": int the objects unique identifier name : str the objects name series_index", "image data again ''' db_image = self.to_db_class() updated_info = db_image.refresh_from_db() self.name = updated_info.name", "of strings. opens metadata.json and edits custom_channel_names Parameters: - channel_names(List[str]): List of strings", "index of image if multiple images were imported in a single file name", "''' Creates object in db. Paths and id are generated and updated in", "metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None): ''' calls", "mask int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init()", "get_image_scaling(self): ''' Reads pixel dimensions and returns relative dimensions. Returns dimensions normalized scales", "type from database. Returns dictionary of format {\"UID_NAME\": UID}. Mainly for use in", "import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import ComImage from", "''' This function expects a new channel names as list of strings. opens", "read by bioformats import when image was imported has_bg_layer : bool False by", "metadata xml data as read by bioformats import when image was imported has_bg_layer", "storage) Returns IntResultMeasurement object: measurement.measurement has shape: (n_labels, n_channel, n_features), n_features == 2", "is stored here metadata : dict reduced metadata for easy use within mistos.", "of the background layer. path_metadata: pathlib.Path, optional path to the images metadata \".json\".", "in database, updates objects path and uid attributes accordingly. Uses default session if", "Returns list of length n_channels. List holds the mean pixel values for each", "will be turned to binary, assuming all labels > 0 to be background.", "app.api.classes_int.IntImageResultLayer): Method to set layer as background layer select_channel(channel: int) -> np.array: Helper", "(==1) for calculation. Saves label layer to image, database and file storage. Parameters:", "is passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.update_image_hint(self.uid,", "from app import crud from app import fileserver_requests as fsr from app.api import", "Returns deep copy of channel with shape (z,y,x). Parameters: - channel(int): index of", "List[DbImageResultLayer] = [] emtpy list by default. List of all associated DbImageResultLayer objects", "image is saved to database. image_result_layers: List[DbImageResultLayer] = [] emtpy list by default.", "self.image_result_layers] db_result_measurements = [measurement.to_db_class() for measurement in self.result_measurements] db_image = DbImage( uid=self.uid, series_index=self.series_index,", "property to False in db. set_bg_true(layer_uid: int, sess = None) sets \"has_bg_layer\" property", "''' layer_uid = image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel: int):", "of the object has_bg_layer: bool = False indicator if image has an associated", "Initializes object. Object is saved in database and file storage get_thumbnail_path(): Helper function", "= [] emtpy list by default. List of all associated DbImageResultLayer objects measurements:", "kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement in self.measurements] return IntImage(**kwargs) def to_com_class(self): ''' Returns", "self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] =", "is defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns measurement object for given result layer", "dims/dims.max() elif n_z == 1: dims = np.array([y, x]) dims = dims/dims.max() else:", "will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel): '''", "image, database and file storage. Parameters: - layer_id_list(List[int]): List of layer ids to", "self.image_result_layers = updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id", "elif mask.shape == (1, image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] =", "layers[0] else: warnings.warn( f\"IntImage.select channel could not select layer with id {uid}.\\nThis image", "for _ in self.image_result_layers if _.uid == uid] if len(layers) > 0: return", "as background layer. ''' layer_uid = image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def", "type___ array of shape (z,c,y,x) in which the image is stored. Is loaded", "= utils_results.calculate_measurement( image_array, layer.data) measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid,", "check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel): ''' A class to handle calculations and other", "changed in db, then object attributes are reloaded from db. ''' db_image =", "self.experiment_ids = updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer", "no bg_layer selected, otherwise it holds the bg_layer_id Methods ------- on_init(): Initializes object.", "if no session is passed default session will be used (app.api.dependencies.get_db) ''' sess", "(DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import ComImage from app.api.dependencies import check_sess from pydantic import", "= False indicator if image has an associated background layer. bg_layer_id: int, optional", "with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string", "False): If True, image array is not reloaded from file storage. ''' kwargs", "= [] for n in range(n_channel): channel_data = self.select_channel(n) selection = np.where(bg_mask, channel_data,", "be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel): ''' A", "defined and zeros if no background layer is defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult:", "optional path to the images metadata \".json\". Automatically generated as image is saved", "db_image = self.to_db_class() updated_info = db_image.refresh_from_db() self.name = updated_info.name self.hint = updated_info.hint self.experiment_ids", "passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute of image in it's metadata.json delete_from_system(sess =", "work with in the frontend data : Any ___TO BE DONE: add custom", "''' kwargs = self.dict() # Only load the full image if not already", "- image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as background layer. ''' layer_uid = image_layer.uid", "deleted. ''' layer = self.select_result_layer(layer_id) if layer_id == self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def", "self.name = updated_info.name self.hint = updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements", "''' image_array = self.data layer = self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement( image_array, layer.data)", "str): ''' Loads all classifiers of given type from database. Returns dictionary of", "List[DbResultMeasurement] = [] emtpy list by default. List of all associated DbResultMeasurement objects", "from database and returns DbImage object. update_hint(new_hint: str, sess = None): updates objects", "List[int], suffix: str): Fetches given layers and uses SimpleITKs STAPLE algorithm to estimate", "in db, then object attributes are reloaded from db. ''' db_image = self.to_db_class()", "== type(None): warnings.warn(\"Image could not be read!\") return None else: image_shape = self.data.shape", "sess=None): ''' Sets imagaes has_bg_layer property to False in database. Parameters: - sess(sqlalchemy.orm.Session):", "default. List of all associated IntResultMeasurement objects tags : Set[str]: empty set by", "measurement in self.measurements] return IntImage(**kwargs) def to_com_class(self): ''' Returns obect as com class.", "bool False by default. Indicates if image as layer selected as background_layer. bg_layer_id", "is passed default session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_true(self.uid,", "returns DbImage object. update_hint(new_hint: str, sess = None): updates objects hint in database.", "db_image def set_bg_false(self): ''' Helper function to set has_bg_layer to False and bg_layer_id", "= updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer =", "kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement in", "image_shape[-2], image_shape[-1])) _mask[:,...] = mask int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid,", "import warnings import xml from pathlib import Path from typing import Any, List,", "= [result_layer.to_db_class() for result_layer in self.image_result_layers] db_result_measurements = [measurement.to_db_class() for measurement in self.result_measurements]", "str hint: str = \"\" has_bg_layer: bool = False bg_layer_id: Optional[int] path_metadata: Optional[Path]", "class DbImage(BaseModel): ''' A class to handle database and file storage of Images", "with shape (z,y,x). Parameters: - channel(int): index of channel to be selected. '''", ") int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path): mask = utils_import.read_mask(path) if type(mask)", "passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) updated_db_image =", "path and id. ''' if self.uid == -1: db_image = self.to_db_class() db_image.create_in_db() self.uid", "image. - suffix(str): will be appended to layer name. ''' if suffix ==", "generated as image is saved to database. path_image: pathlib.Path, optional path to the", "None) sets \"has_bg_layer\" property to False in db. set_bg_true(layer_uid: int, sess = None)", "dims/dims.max() else: dims = None print(\"Couldn't calculate scaling from metadata, defaulting to None\")", "new_hint: str, sess=None): ''' This function expects a new hint as string and", "be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid: int,", "provided in metadata. ''' x = self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z']", "''' sess = check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel): ''' A class to handle", "is passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute of image in it's metadata.json delete_from_system(sess", "to image, database and file storage. Parameters: - layer_id_list(List[int]): List of layer ids", "keywords to easily categorize objects in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object as", "IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def", "> 1: dims = np.array([z, y, x]) dims = dims/dims.max() elif n_z ==", "import fileserver_requests as fsr from app.api import utils_import, utils_paths, utils_results from app.api.classes.image_result_layer import", "all associated files and db entries Parameters: - sess(sqlalchemy.orm.Session): The database session to", "the index of the image is stored here metadata : dict reduced metadata", "self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask = bg_layer.data else: bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2],", "= [] tags: Set[str] = set() data: Any metadata_omexml: Any has_bg_layer: bool =", "copy from os import name import warnings import xml from pathlib import Path", "attributes accordingly. Uses default session if none is passed. refresh_from_db() -> DbImage Fetches", "dict fsr.save_json(self.metadata, db_image.path_metadata) # save metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string =", "None. Attribute is changed in db, then object attributes are reloaded from db.", "self.refresh_from_db() def select_channel(self, channel: int): ''' Helper method expects channel index. Returns deep", "return dims def to_db_class(self): ''' Transforms internal class representation to db class representation.", "given value. Parameters: - layer_uid(int): uid of result layer to be used as", "sess = check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel): ''' A class to handle calculations", "= [] tags: Set[str] = [] def to_int_class(self, for_refresh=False): ''' Returns object as", "the associated layers \\n{layers}\", UserWarning) return None def calculate_background(self): ''' Expects the bg_uid", "Fetches given layers and uses SimpleITKs STAPLE algorithm to estimate ground truth. Resulting", "bg_layer_id : int, optional None if no bg_layer selected, otherwise it holds the", "of all associated IntImageResultLayer objects. result_measurements : List[IntResultMeasurement] empty list by default. List", "kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for measurement in", "kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs) def set_bg_false(self,", "''' Returns obect as com class. ''' kwargs = self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only(", "path in the process. to_com_class()->app.api.classes_com.ComImage: returns object as com_class. set_bg_false(sess = None) sets", "import copy from os import name import warnings import xml from pathlib import", "pathlib import Path from typing import Any, List, Optional, Set import numpy as", "-> np.array: Helper method expects channel index. Returns deep copy of channel with", "session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self,", "This function expects a new hint as string and calls crud.update_image_hint to update", "will be initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int name: str series_index:", "x]) dims = dims/dims.max() elif n_z == 1: dims = np.array([y, x]) dims", "= None) sets \"has_bg_layer\" property to False in db. set_bg_true(layer_uid: int, sess =", "Only load the full image if not already loaded if for_refresh == False:", "= db_image.refresh_from_db() self.name = updated_info.name self.hint = updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers =", "to set has_bg_layer to False and bg_layer_id to None. Attribute is changed in", "by SimpleITK's STAPLE probabilities. For ground truth estimation layer will be binarized, all", "to given value. create_in_db(sess = None): creates object in database, updates objects path", "dims = None print(\"Couldn't calculate scaling from metadata, defaulting to None\") return dims", "suffix == None: suffix = \"\" else: suffix = \"_\" + suffix label_array_list", "image_result_layers: List[DbImageResultLayer] = [] emtpy list by default. List of all associated DbImageResultLayer", "self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self, layer_id: int): ''' Method to delete a result", "str): Fetches given layers and uses SimpleITKs STAPLE algorithm to estimate ground truth.", "background_layer. bg_layer_id : int, optional None if no bg_layer selected, otherwise it holds", "layer. bg_layer_id: int, optional None if no associated background layer, otherwise id of", "dict reduced metadata for easy use within mistos. As created by app.api.utils_import.acquire_metadata_dict (creates", "this image. - suffix(str): will be appended to layer name. ''' if suffix", "path_image: pathlib.Path, optional path to the images array \".zarr\" folder. Automatically generated as", "the images metadata \".json\". Automatically generated as image is saved to database. path_image:", "crud from app import fileserver_requests as fsr from app.api import utils_import, utils_paths, utils_results", "= bg_mask.sum() n_channel = self.data.shape[1] mean_pixel = [] for n in range(n_channel): channel_data", "utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask )", "and uid attributes accordingly. Uses default session if none is passed. refresh_from_db() ->", "saved as channel names. ''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata)", "if clf_dict == {}: clf_dict[\"No classifers found\"] = None return clf_dict def refresh_from_db(self):", "bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement] =", "= updated_info.name self.hint = updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements =", "get_image_scaling(): Returns dimensions normalized scales in array with shape (z,y,x) or None. to_db_class()", "calls crud.update_image_hint to update the image hint. Parameters: - new_hint(str): string to be", "file storage and the image. If layer was background_layer, corresponding attributes are reset.", "uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn(", "bg_layer_id: Optional[int] def on_init(self): ''' Method to initialize the object. Handles image as", "be turned to binary, assuming all labels > 0 to be background. Returns", "_mask[:,...] = mask int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask", "default session if none is passed. ''' uid: int series_index: int name: str", "image. Result layer will be turned to binary, assuming all labels > 0", "layer from database, file storage and the image. If layer was background_layer, corresponding", "image was imported has_bg_layer : bool False by default. Indicates if image as", "utils_results.calculate_measurement( image_array, layer.data) measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement,", "List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement] = [] tags: Set[str] = set() data: Any", "None. to_db_class() -> app.api.classes_db.DbImage: Returns object as DbImage object. set_bg_false(): Helper function to", "layers = [_ for _ in self.image_result_layers if _.uid == uid] if len(layers)", "list by default. List of experiments_group ids which use the image. image_result_layers :", "in database which generates path and id. ''' if self.uid == -1: db_image", "labels > 0 to be background. Returns list of length n_channel with mean", "Parameters: - new_hint(str): string to be saved. - sess(sqlalchemy.orm.Session): The database session to", "int) -> app.api.classes_internal.IntResultLayer | None: Returns layer with corresponding id, returns None if", "imported via one file (image series), the index of the image is stored", "''' Helper method expects channel index. Returns deep copy of channel with shape", "session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) sql_image = crud.create_image(self, sess)", "\"has_bg_layer\" property to False in db. set_bg_true(layer_uid: int, sess = None) sets \"has_bg_layer\"", "selected, otherwise it holds the bg_layer_id Methods ------- on_init(): Initializes object. Object is", ") int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask shape {mask.shape} does not match image", "image array is not reloaded from file storage. ''' kwargs = self.dict() #", "db. Paths and id are generated and updated in object. Parameters: - sess(sqlalchemy.orm.Session):", "metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail thumbnail =", "refresh_from_db(self): ''' Requests current information from db and updates the object's attributes accordingly.", "= self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs)", "holds the bg_layer_id Methods ------- on_init(): Initializes object. Object is saved in database", "len(layers) > 0: return layers[0] else: warnings.warn( f\"IntImage.select channel could not select layer", "in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object as int_class. Loads layer array from", "the images array \".zarr\" folder. Automatically generated as image is saved to database.", "image is stored. Is loaded from .zarr files, most numpy operations work, some", "tags: Set[str] = set() data: Any metadata_omexml: Any has_bg_layer: bool = False bg_layer_id:", "in which the image is stored. Is loaded from .zarr files, most numpy", "associated DbImageResultLayer objects measurements: List[DbResultMeasurement] = [] emtpy list by default. List of", "calculate scaling from metadata, defaulting to None\") return dims def to_db_class(self): ''' Transforms", "the objects unique identifier series_index : int index of image if multiple images", "= utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif", "with in the frontend data : Any ___TO BE DONE: add custom field", "the object has_bg_layer: bool = False indicator if image has an associated background", "load the full image if not already loaded if for_refresh == False: data", "saved. - sess(sqlalchemy.orm.Session): The database session to be used, if no session is", "select layer with id {uid}.\\nThis image has the associated layers \\n{layers}\", UserWarning) return", "import Path class DbImage(BaseModel): ''' A class to handle database and file storage", "def calculate_background(self): ''' Expects the bg_uid to belong to a result layer of", "and file storage of Images Attributes ---------- uid : int the objects unique", "''' Method to set layer as background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to", "int): Deletes the layer from database, file storage and the image. If layer", "[] measurements: List[DbResultMeasurement] = [] tags: Set[str] = [] def to_int_class(self, for_refresh=False): '''", "set() data: Any metadata_omexml: Any has_bg_layer: bool = False bg_layer_id: Optional[int] def on_init(self):", "expects channel index. Returns deep copy of channel with shape (z,y,x). select_result_layer(uid: int)", "hint : str, optional empty string by default. brief description of the object", "data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1, image_shape[-2], image_shape[-1]): _mask =", "id is not found in self.image_result_layers calculate_background() -> List: Returns list of length", "metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid == -2: db_image = self.to_db_class() db_image.create_in_db() self.uid =", "def refresh_from_db(self): ''' Requests current information from db and updates the object's attributes", "unique identifier name : str the objects name series_index : int if multiple", "in self.measurements] return IntImage(**kwargs) def to_com_class(self): ''' Returns obect as com class. '''", "kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer in self.image_result_layers] kwargs[\"measurements\"] =", "as read by bioformats import when image was imported has_bg_layer : bool False", "Returns layer with corresponding id, returns None if id is not found in", "hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1,", "and updates the objects attributes. delete_result_layer(layer_id: int): Deletes the layer from database, file", "return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads pixel dimensions and returns relative dimensions. Returns", "sets \"has_bg_layer\" property to True in db. sets bg_layer_id to given value. create_in_db(sess", "be used, if no session is passed default session will be used (app.api.dependencies.get_db)", "to_db_class() -> app.api.classes_db.DbImage: Returns object as DbImage object. set_bg_false(): Helper function to set", "= self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if n_z > 1: dims = np.array([z, y,", "DbImage(BaseModel): ''' A class to handle database and file storage of Images Attributes", "and updates the object's attributes accordingly. Does not reload image data again '''", "and uses SimpleITKs STAPLE algorithm to estimate ground truth. Resulting layer will be", "= updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self, layer_id: int): '''", "from db. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no", "if for_refresh == False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else: kwargs[\"data\"] =", "default. Set of keywords to work with in the frontend data : Any", "algorithm to estimate ground truth. Resulting layer will be initialized as IntResultLayer. add_layer_from_roi(path)", "to db class representation. ''' db_image_result_layers = [result_layer.to_db_class() for result_layer in self.image_result_layers] db_result_measurements", "save metadata dict fsr.save_json(self.metadata, db_image.path_metadata) # save metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata)", "= np.where(bg_mask > 0, 1, 0) n_pixel = bg_mask.sum() n_channel = self.data.shape[1] mean_pixel", "image as new image if \"uid\" == -1 and as imported Mistos image", "for measurement in self.measurements] return IntImage(**kwargs) def to_com_class(self): ''' Returns obect as com", "(image series), the index of the image is stored here metadata : dict", "None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set layer as background layer select_channel(channel: int) ->", "For ground truth estimation layer will be binarized, all labels > 0 will", "fileserver path and joins with return value from utils_paths.make_thumbnail_path Returns path as pathlib.Path", "image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] = mask int_result_layer = IntImageResultLayer(", "This function expects a new channel names as list of strings. opens metadata.json", "name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask", "self.dict() # Only load the full image if not already loaded if for_refresh", "is passed. refresh_from_db() -> DbImage Fetches image from database and returns DbImage object.", "Method to set layer as background layer select_channel(channel: int) -> np.array: Helper method", "None): ''' Method to estimate ground truth from multiple layers with by SimpleITK's", "self.image_result_layers calculate_background() -> List: Returns list of length n_channels. List holds the mean", "updates objects path and uid attributes accordingly. Uses default session if none is", "value. Parameters: - layer_uid(int): uid of result layer to be used as background", ": str the objects name hint : str = \"\" empty string by", "from app.api.dependencies import check_sess from pydantic import BaseModel, constr from pathlib import Path", "then object attributes are reloaded from db. ''' db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db()", "os import name import warnings import xml from pathlib import Path from typing", "attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches given layers and uses SimpleITKs", "= [measurement.to_int_class() for measurement in self.measurements] return IntImage(**kwargs) def to_com_class(self): ''' Returns obect", "Any, List, Optional, Set import numpy as np from app import crud from", "= check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel): ''' A class to handle calculations and", "ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint = f\"Following Label Layers were used to estimate", "image_layer: IntImageResultLayer): ''' Method to set layer as background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer):", "Uses default session if none is passed. refresh_from_db() -> DbImage Fetches image from", "sql_image = crud.create_image(self, sess) self.uid = sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata)", "int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path): mask = utils_import.read_mask(path) if type(mask) ==", "from pydantic import BaseModel, constr from pathlib import Path class DbImage(BaseModel): ''' A", "channel(int): index of channel to be selected. ''' channel_data = copy.deepcopy(self.data[:, channel, ...])[", "add_layer_from_roi(self, path: Path): mask = utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported", "0, 1, 0) n_pixel = bg_mask.sum() n_channel = self.data.shape[1] mean_pixel = [] for", "object as int_class. Loads layer array from file path in the process. to_com_class()->app.api.classes_com.ComImage:", "image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask shape {mask.shape} does", "bool = False indicator if image has an associated background layer. bg_layer_id: int,", "calculations and other internal operations with images. Attributes ---------- uid : int the", ":, np.newaxis, ...] return channel_data def select_result_layer(self, uid: int): layers = [_ for", "return ComImage(**kwargs) def set_bg_false(self, sess=None): ''' Sets imagaes has_bg_layer property to False in", "as imported Mistos image if \"uid\" == -2. Creates image in database which", "hint = f\"Following Label Layers were used to estimate the ground truth: {layer_id_list}\"", "of this image from db and updates the objects attributes. delete_result_layer(layer_id: int): Deletes", "database, file storage and the image. If layer was background_layer, corresponding attributes are", "a new channel names as list of strings. opens metadata.json and edits custom_channel_names", "-> List: Returns list of length n_channels. List holds the mean pixel values", "to belong to a result layer of this image. Result layer will be", "''' # Fetches dict in form {name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict", "check_sess from pydantic import BaseModel, constr from pathlib import Path class DbImage(BaseModel): '''", "metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement", "= utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) #", "clf_type(str): Valid classifier type, for available types see app.api.cfg_classes.classifier_types. ''' # Fetches dict", "dictionary of format {\"UID_NAME\": UID} refresh_from_db(): Fetches data of this image from db", "folder. Automatically generated as image is saved to database. image_result_layers: List[DbImageResultLayer] = []", "metadata.json delete_from_system(sess = None): deletes object in database and file storage. Uses default", "passed. refresh_from_db() -> DbImage Fetches image from database and returns DbImage object. update_hint(new_hint:", "= None): deletes object in database and file storage. Uses default session if", ".zarr files, most numpy operations work, some may cause trouble. metadata_omexml : Any", "return updated_db_image def update_hint(self, new_hint: str, sess=None): ''' This function expects a new", "images metadata \".json\". Automatically generated as image is saved to database. path_image: pathlib.Path,", "for_refresh == False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else: kwargs[\"data\"] = None", "to the images array \".zarr\" folder. Automatically generated as image is saved to", "(app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self, sess=None): ''' Creates", "joins with return value from utils_paths.make_thumbnail_path Returns path as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid))", "storage. Parameters: - layer_id_list(List[int]): List of layer ids to be used for ground", "of length n_channel with mean intensity of measured pixels. ''' if self.has_bg_layer: bg_uid", "# save zarr fsr.save_zarr(self.data, db_image.path_image) # save metadata dict fsr.save_json(self.metadata, db_image.path_metadata) # save", "hint: Optional[str] = \"\" experiment_ids: List[int] = [] image_result_layers: List[IntImageResultLayer] = [] result_measurements:", "uid : int the objects unique identifier series_index : int index of image", ": int index of image if multiple images were imported in a single", "> 0 will be unified and represented as foreground (==1) for calculation. Saves", "to set has_bg_layer to False and bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to", "frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object as int_class. Loads layer array from file", "\"uid\" == -2. Creates image in database which generates path and id. '''", "will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self,", "db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename # save zarr", "object attributes are reloaded from db. ''' db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def", "updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self, layer_id: int): ''' Method to delete a", "to easily categorize objects in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object as int_class.", "''' sess = check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid, sess, for_refresh=True) return updated_db_image def", "int metadata: dict hint: Optional[str] = \"\" experiment_ids: List[int] = [] image_result_layers: List[IntImageResultLayer]", "database and file storage get_thumbnail_path(): Helper function which returns path to the thumbnail", "= [_ for _ in self.image_result_layers if _.uid == uid] if len(layers) >", "as background layer. - sess(sqlalchemy.orm.Session): The database session to be used, if no", "to db and file storage. get_classifiers(clf_type: str) -> dict: Fetches all saved classifiers", "metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path =", "to the thumbnail on fileserver. Gets gets fileserver path and joins with return", "image. If layer was background_layer, corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str):", "emtpy list by default. List of all associated DbImageResultLayer objects measurements: List[DbResultMeasurement] =", "metadata: dict hint: Optional[str] = \"\" experiment_ids: List[int] = [] image_result_layers: List[IntImageResultLayer] =", "of all associated IntResultMeasurement objects tags : Set[str]: empty set by default. Set", "= DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements,", "Parameters: - layer_id(int): Id of result layer to be deleted. ''' layer =", "= db_image.uid print(f\"Importing archived Mistos image with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata)", "self.data.shape[3] )) if bg_mask.max() < 2: bg_mask = np.where(bg_mask > 0, 1, 0)", "app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole series) Series metadatadict is passed into IntImage.on_init().", "database and file storage of Images Attributes ---------- uid : int the objects", "of strings to be saved as channel names. ''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"]", "def delete_from_system(self, sess=None): ''' calls crud.delete_image and passed db_image object to delete all", "db and file storage) Returns IntResultMeasurement object: measurement.measurement has shape: (n_labels, n_channel, n_features),", "in db. sets bg_layer_id to given value. create_in_db(sess = None): creates object in", "image_shape[-1])) _mask[:,...] = mask int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\",", "selected as background_layer. bg_layer_id : int, optional None if no bg_layer selected, otherwise", "series_index: int metadata: dict hint: Optional[str] = \"\" experiment_ids: List[int] = [] image_result_layers:", "= utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata", "Returns dimensions normalized scales in array with shape (z,y,x) or None if no", "Method to measure mask and save result as ResultMeasurement. Creates measurement object and", "be saved as channel names. ''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata,", "objects tags: Set[str] = [] set of string keywords to easily categorize objects", "type, for available types see app.api.cfg_classes.classifier_types. ''' # Fetches dict in form {name:", "associated IntResultMeasurement objects tags : Set[str]: empty set by default. Set of keywords", "IntResultMeasurement object: measurement.measurement has shape: (n_labels, n_channel, n_features), n_features == 2 (n_pixels, sum_pixels)", "IntImage.on_init(). Thereafter, only image's metadata will be saved to .json and loaded. hint", "measurement in self.result_measurements] db_image = DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer,", "numpy operations work, some may cause trouble. metadata_omexml : Any original metadata xml", "update the image hint. Parameters: - new_hint(str): string to be saved. - sess(sqlalchemy.orm.Session):", "id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}: clf_dict[\"No classifers found\"] = None", "= [image_result_layer.to_int_class() for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement in self.measurements]", "no associated background layer, otherwise id of the background layer. path_metadata: pathlib.Path, optional", "metadata_omexml: Any has_bg_layer: bool = False bg_layer_id: Optional[int] def on_init(self): ''' Method to", "database and returns DbImage object. update_hint(new_hint: str, sess = None): updates objects hint", "None): creates object in database, updates objects path and uid attributes accordingly. Uses", "int, sess=None): ''' Sets images bg_layer_id property to given value. Parameters: - layer_uid(int):", "defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns measurement object for given result layer and", "== -2: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid print(f\"Importing archived Mistos image", "objects. result_measurements : List[IntResultMeasurement] empty list by default. List of all associated IntResultMeasurement", "layer name. ''' if suffix == None: suffix = \"\" else: suffix =", "storage and the image. If layer was background_layer, corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list:", "numpy as np from app import crud from app import fileserver_requests as fsr", "image as layer selected as background_layer. bg_layer_id : int, optional None if no", "with shape (z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None: Returns layer with corresponding", "to estimate the ground truth: {layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid,", "else: kwargs[\"data\"] = None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] =", "hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path):", "db, then object attributes are reloaded from db. ''' db_image = self.to_db_class() db_image.set_bg_false()", "in self.image_result_layers] db_result_measurements = [measurement.to_db_class() for measurement in self.result_measurements] db_image = DbImage( uid=self.uid,", "empty set by default. Set of keywords to work with in the frontend", "# metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail thumbnail", "print(\"Couldn't calculate scaling from metadata, defaulting to None\") return dims def to_db_class(self): '''", "to layer name. ''' if suffix == None: suffix = \"\" else: suffix", "self.measurements] return IntImage(**kwargs) def to_com_class(self): ''' Returns obect as com class. ''' kwargs", "self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path): mask = utils_import.read_mask(path) if type(mask) == type(None):", "of result layer to be used as background layer. - sess(sqlalchemy.orm.Session): The database", "storage. ''' kwargs = self.dict() # Only load the full image if not", "name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return measurement_result def", "of channel to be selected. ''' channel_data = copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis,", "layer. path_metadata: pathlib.Path, optional path to the images metadata \".json\". Automatically generated as", "<reponame>Maddonix/mistos_2 import copy from os import name import warnings import xml from pathlib", "layers and uses SimpleITKs STAPLE algorithm to estimate ground truth. Resulting layer will", "is saved in database and file storage get_thumbnail_path(): Helper function which returns path", "utils_paths.make_thumbnail_path Returns path as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads pixel", "default. List of experiments_group ids which use the image. image_result_layers : List[IntImageResultLayer] empty", "has_bg_layer to False and bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set layer", "Series metadatadict is passed into IntImage.on_init(). Thereafter, only image's metadata will be saved", "self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if n_z > 1: dims =", "Id of result layer to be deleted. ''' layer = self.select_result_layer(layer_id) if layer_id", "database and file storage. Parameters: - layer_id_list(List[int]): List of layer ids to be", "default session if none is passed. refresh_from_db() -> DbImage Fetches image from database", "path as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads pixel dimensions and", "int the objects unique identifier name : str the objects name series_index :", "self.result_measurements] db_image = DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids,", "to measure mask and save result as ResultMeasurement. Creates measurement object and initializes", "id are generated and updated in object. Parameters: - sess(sqlalchemy.orm.Session): The database session", "be read!\") return None else: image_shape = self.data.shape if mask.shape == (image_shape[0], image_shape[-2],", "layer by uid. If result layer is selected as background layer, the attributes", "int): ''' Method to delete a result layer by uid. If result layer", "the objects name series_index : int if multiple images are imported via one", "path and joins with return value from utils_paths.make_thumbnail_path Returns path as pathlib.Path '''", "from database. Returns dictionary of format {\"UID_NAME\": UID}. Mainly for use in napari", "as image is saved to database. image_result_layers: List[DbImageResultLayer] = [] emtpy list by", "Parameters: - layer_uid(int): uid of result layer to be used as background layer.", "metadata, defaulting to None\") return dims def to_db_class(self): ''' Transforms internal class representation", "Automatically generated as image is saved to database. image_result_layers: List[DbImageResultLayer] = [] emtpy", "utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for", "available types see app.api.cfg_classes.classifier_types. ''' # Fetches dict in form {name: id} clf_dict", "the process. to_com_class()->app.api.classes_com.ComImage: returns object as com_class. set_bg_false(sess = None) sets \"has_bg_layer\" property", "image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement in self.measurements] return IntImage(**kwargs) def", "set_bg_true(layer_uid: int, sess = None) sets \"has_bg_layer\" property to True in db. sets", ": List[IntImageResultLayer] empty list by default. List of all associated IntImageResultLayer objects. result_measurements", "form {name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}: clf_dict[\"No classifers found\"]", "has_bg_layer to False and bg_layer_id to None. Attribute is changed in db, then", "def refresh_from_db(self, sess=None): ''' Refreshes object image from db. Parameters: - sess(sqlalchemy.orm.Session): The", "channel with shape (z,y,x). Parameters: - channel(int): index of channel to be selected.", "image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1, image_shape[-2], image_shape[-1]):", "List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement] = [] tags: Set[str] = [] def to_int_class(self,", "series_index : int if multiple images are imported via one file (image series),", "saved to .json and loaded. hint : str, optional empty string by default.", "If True, image array is not reloaded from file storage. ''' kwargs =", "channel index. Returns deep copy of channel with shape (z,y,x). Parameters: - channel(int):", "layer, otherwise id of the background layer. path_metadata: pathlib.Path, optional path to the", "self.to_db_class() updated_info = db_image.refresh_from_db() self.name = updated_info.name self.hint = updated_info.hint self.experiment_ids = updated_info.experiment_ids", "True, image array is not reloaded from file storage. ''' kwargs = self.dict()", "update_channel_names(self, channel_names: List[str]): ''' This function expects a new channel names as list", "sess = None) sets \"has_bg_layer\" property to True in db. sets bg_layer_id to", "List[str]): ''' This function expects a new channel names as list of strings.", "''' sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid: int, sess=None): ''' Sets", "unique identifier series_index : int index of image if multiple images were imported", "[] def to_int_class(self, for_refresh=False): ''' Returns object as int class. Parameters: - for_refresh(bool", "class to handle calculations and other internal operations with images. Attributes ---------- uid", "= selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self, layer_id: int): ''' Method to measure", "get_thumbnail_path(): Helper function which returns path to the thumbnail on fileserver. get_image_scaling(): Returns", "the thumbnail on fileserver. Gets gets fileserver path and joins with return value", "sess=None): ''' Sets images bg_layer_id property to given value. Parameters: - layer_uid(int): uid", "uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path:", "in it's metadata.json delete_from_system(sess = None): deletes object in database and file storage.", "-> app.api.classes_int.IntMeasurementResult: Returns measurement object for given result layer and saves it to", "metadata. ''' x = self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z =", "the image hint. Parameters: - new_hint(str): string to be saved. - sess(sqlalchemy.orm.Session): The", "List[int] empty list by default. List of experiments_group ids which use the image.", "will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) sql_image = crud.create_image(self, sess) self.uid", "== (1, image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] = mask int_result_layer", "a new hint as string and calls crud.update_image_hint to update the image hint.", "loaded if for_refresh == False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else: kwargs[\"data\"]", "passed default session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid,", "db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid ==", "represented as foreground (==1) for calculation. Saves label layer to image, database and", "of result layer to be deleted. ''' layer = self.select_result_layer(layer_id) if layer_id ==", "___TO BE DONE: add custom field type___ array of shape (z,c,y,x) in which", "measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return measurement_result def get_classifiers(self, clf_type: str): ''' Loads", "if multiple images are imported via one file (image series), the index of", "self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement in self.measurements] return IntImage(**kwargs) def to_com_class(self): '''", "DbImage object. update_hint(new_hint: str, sess = None): updates objects hint in database. Uses", "= None return clf_dict def refresh_from_db(self): ''' Requests current information from db and", "= utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class()", "self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): ''' Refreshes object image from db. Parameters:", "object has_bg_layer: bool = False indicator if image has an associated background layer.", "(DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import ComImage from app.api.dependencies", "kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer in self.image_result_layers]", "estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str = None): ''' Method to estimate ground truth", "== 1: dims = np.array([y, x]) dims = dims/dims.max() else: dims = None", "sets \"has_bg_layer\" property to False in db. set_bg_true(layer_uid: int, sess = None) sets", "def create_in_db(self, sess=None): ''' Creates object in db. Paths and id are generated", "utils_paths, utils_results from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from", "(app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel): ''' A class to", "all saved classifiers from db, filters for type and returns dictionary of format", "image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init()", "pathlib import Path class DbImage(BaseModel): ''' A class to handle database and file", "self.data.shape[1] mean_pixel = [] for n in range(n_channel): channel_data = self.select_channel(n) selection =", "else: bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if bg_mask.max() < 2: bg_mask", "return db_image def set_bg_false(self): ''' Helper function to set has_bg_layer to False and", "type and returns dictionary of format {\"UID_NAME\": UID} refresh_from_db(): Fetches data of this", "pixel dimensions and returns relative dimensions. Returns dimensions normalized scales in array with", "the layer to be measured ''' image_array = self.data layer = self.select_result_layer(layer_id) measurement,", "set of string keywords to easily categorize objects in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage:", "False indicator if image has an associated background layer. bg_layer_id: int, optional None", "by app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole series) Series metadatadict is passed into", "''' db_image_result_layers = [result_layer.to_db_class() for result_layer in self.image_result_layers] db_result_measurements = [measurement.to_db_class() for measurement", "self.tags = updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self, layer_id: int):", "Methods ------- on_init(): Initializes object. Object is saved in database and file storage", "Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as background layer. ''' layer_uid =", "background layer select_channel(channel: int) -> np.array: Helper method expects channel index. Returns deep", "\\n{layers}\", UserWarning) return None def calculate_background(self): ''' Expects the bg_uid to belong to", "if none is passed. refresh_from_db() -> DbImage Fetches image from database and returns", "archived Mistos image with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path(", "- layer_uid(int): uid of result layer to be used as background layer. -", "sess=None): ''' calls crud.delete_image and passed db_image object to delete all associated files", "and loaded. hint : str, optional empty string by default. brief description of", "elif n_z == 1: dims = np.array([y, x]) dims = dims/dims.max() else: dims", "''' sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self, sess=None): ''' Creates object", "for whole series) Series metadatadict is passed into IntImage.on_init(). Thereafter, only image's metadata", "optional empty string by default. brief description of the object experiment_ids: List[int] empty", "to estimate ground truth. Resulting layer will be initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path)", "A class to handle database and file storage of Images Attributes ---------- uid", "> 0, 1, 0) n_pixel = bg_mask.sum() n_channel = self.data.shape[1] mean_pixel = []", "= crud.create_image(self, sess) self.uid = sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def", "= f\"Following Label Layers were used to estimate the ground truth: {layer_id_list}\" int_result_layer", "image has the associated layers \\n{layers}\", UserWarning) return None def calculate_background(self): ''' Expects", "on fileserver. get_image_scaling(): Returns dimensions normalized scales in array with shape (z,y,x) or", "from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import ComImage from app.api.dependencies import check_sess", "A class to handle calculations and other internal operations with images. Attributes ----------", "== {}: clf_dict[\"No classifers found\"] = None return clf_dict def refresh_from_db(self): ''' Requests", "description of the object has_bg_layer: bool = False indicator if image has an", "and returns dictionary of format {\"UID_NAME\": UID} refresh_from_db(): Fetches data of this image", "crud.read_image_by_uid( self.uid, sess, for_refresh=True) return updated_db_image def update_hint(self, new_hint: str, sess=None): ''' This", "List of strings to be saved as channel names. ''' metadata = fsr.load_json(self.path_metadata)", "layer_uid: int, sess=None): ''' Sets images bg_layer_id property to given value. Parameters: -", "Returns list of length n_channel with mean intensity of measured pixels. ''' if", "# Fetches dict in form {name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict ==", "bool = False bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] = []", "layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str = None): ''' Method to", "image in database which generates path and id. ''' if self.uid == -1:", "= updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self, layer_id: int): ''' Method to delete", "image if \"uid\" == -2. Creates image in database which generates path and", "Helper function which returns path to the thumbnail on fileserver. Gets gets fileserver", "Returns obect as com class. ''' kwargs = self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata)", "''' Method to initialize the object. Handles image as new image if \"uid\"", "layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint = f\"Following Label Layers were used to", "and save result as ResultMeasurement. Creates measurement object and initializes it (save to", "kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs) def set_bg_false(self, sess=None): ''' Sets imagaes has_bg_layer property", "layer with id {uid}.\\nThis image has the associated layers \\n{layers}\", UserWarning) return None", "returns dictionary of format {\"UID_NAME\": UID} refresh_from_db(): Fetches data of this image from", "and file storage. Uses default session if none is passed. ''' uid: int", "Mistos image with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata)", "result_layer in self.image_result_layers] db_result_measurements = [measurement.to_db_class() for measurement in self.result_measurements] db_image = DbImage(", "path: Path): mask = utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\",", "= set() data: Any metadata_omexml: Any has_bg_layer: bool = False bg_layer_id: Optional[int] def", "napari viewer. Parameters: - clf_type(str): Valid classifier type, for available types see app.api.cfg_classes.classifier_types.", "operations with images. Attributes ---------- uid : int the objects unique identifier name", "saves it to db and file storage. get_classifiers(clf_type: str) -> dict: Fetches all", "is selected as background layer, the attributes \"has_bg_layer\" and \"bg_layer_id\" are set to", "labels > 0 will be unified and represented as foreground (==1) for calculation.", "Sets images bg_layer_id property to given value. Parameters: - layer_uid(int): uid of result", "used for ground truth estimation. Must belong to this image. - suffix(str): will", "IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else:", "id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string =", "db_image object to delete all associated files and db entries Parameters: - sess(sqlalchemy.orm.Session):", "suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list)", "new channel names as list of strings. opens metadata.json and edits custom_channel_names Parameters:", "dimensions normalized scales in array with shape (z,y,x) or None if no scaling", "index. Returns deep copy of channel with shape (z,y,x). Parameters: - channel(int): index", "= self.to_db_class() db_image.create_in_db() self.uid = db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"]", "foreground (==1) for calculation. Saves label layer to image, database and file storage.", "= IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def", "of format {\"UID_NAME\": UID} refresh_from_db(): Fetches data of this image from db and", "Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no session is", "refresh_from_db() -> DbImage Fetches image from database and returns DbImage object. update_hint(new_hint: str,", "False in db. set_bg_true(layer_uid: int, sess = None) sets \"has_bg_layer\" property to True", "updated_db_image def update_hint(self, new_hint: str, sess=None): ''' This function expects a new hint", "not select layer with id {uid}.\\nThis image has the associated layers \\n{layers}\", UserWarning)", "the attributes \"has_bg_layer\" and \"bg_layer_id\" are set to False and None. Parameters: -", "app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import ComImage", "most numpy operations work, some may cause trouble. metadata_omexml : Any original metadata", "path_xml) # save thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def", "new_hint, sess) def update_channel_names(self, channel_names: List[str]): ''' This function expects a new channel", "import numpy as np from app import crud from app import fileserver_requests as", "if background layer is defined and zeros if no background layer is defined.", "else: warnings.warn( f\"IntImage.select channel could not select layer with id {uid}.\\nThis image has", "Path from typing import Any, List, Optional, Set import numpy as np from", "list(self.tags) return ComImage(**kwargs) def set_bg_false(self, sess=None): ''' Sets imagaes has_bg_layer property to False", "easy use within mistos. As created by app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole", "updates the object's attributes accordingly. Does not reload image data again ''' db_image", "= self.metadata['pixel_size_z'] if n_z > 1: dims = np.array([z, y, x]) dims =", "expects a new channel names as list of strings. opens metadata.json and edits", "if \"uid\" == -1 and as imported Mistos image if \"uid\" == -2.", "object in database and file storage. Uses default session if none is passed.", "crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self, sess=None): ''' Creates object in db. Paths and", "returns path to the thumbnail on fileserver. get_image_scaling(): Returns dimensions normalized scales in", "background layer. path_metadata: pathlib.Path, optional path to the images metadata \".json\". Automatically generated", "= utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask", "by uid. If result layer is selected as background layer, the attributes \"has_bg_layer\"", "with images. Attributes ---------- uid : int the objects unique identifier name :", "List: Returns list of length n_channels. List holds the mean pixel values for", "kwargs = self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer in", "def set_bg_false(self): ''' Helper function to set has_bg_layer to False and bg_layer_id to", "= None) sets \"has_bg_layer\" property to True in db. sets bg_layer_id to given", "layer to be deleted. ''' layer = self.select_result_layer(layer_id) if layer_id == self.bg_layer_id: self.set_bg_false()", "information was provided in metadata. ''' x = self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z", "use in napari viewer. Parameters: - clf_type(str): Valid classifier type, for available types", "name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape ==", "deletes object in database and file storage. Uses default session if none is", "into IntImage.on_init(). Thereafter, only image's metadata will be saved to .json and loaded.", "session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def", "int name: str series_index: int metadata: dict hint: Optional[str] = \"\" experiment_ids: List[int]", "= None print(\"Couldn't calculate scaling from metadata, defaulting to None\") return dims def", "uid] if len(layers) > 0: return layers[0] else: warnings.warn( f\"IntImage.select channel could not", "if none is passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute of image in it's", "passed db_image object to delete all associated files and db entries Parameters: -", "add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int name: str series_index: int metadata: dict hint: Optional[str]", "int) -> np.array: Helper method expects channel index. Returns deep copy of channel", "optional None if no bg_layer selected, otherwise it holds the bg_layer_id Methods -------", "object for given result layer and saves it to db and file storage.", "uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self,", "int the objects unique identifier series_index : int index of image if multiple", "and returns DbImage object. update_hint(new_hint: str, sess = None): updates objects hint in", "[] tags: Set[str] = set() data: Any metadata_omexml: Any has_bg_layer: bool = False", "used as background layer. - sess(sqlalchemy.orm.Session): The database session to be used, if", "and db entries Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if", "db_image.uid print(f\"Importing archived Mistos image with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml", "metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] =", "Parameters: - for_refresh(bool = False): If True, image array is not reloaded from", "layer is defined and zeros if no background layer is defined. measure_mask_in_image(layer_id: int)", "int) -> app.api.classes_int.IntMeasurementResult: Returns measurement object for given result layer and saves it", "(1, image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] = mask int_result_layer =", "Refreshes object image from db. Parameters: - sess(sqlalchemy.orm.Session): The database session to be", "file storage. get_classifiers(clf_type: str) -> dict: Fetches all saved classifiers from db, filters", "sess) def set_bg_true(self, layer_uid: int, sess=None): ''' Sets images bg_layer_id property to given", "selected as background layer, the attributes \"has_bg_layer\" and \"bg_layer_id\" are set to False", "= self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs) def set_bg_false(self, sess=None): ''' Sets imagaes", "[] emtpy list by default. List of all associated DbResultMeasurement objects tags: Set[str]", "estimate ground truth. Resulting layer will be initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) '''", "IntImageResultLayer objects. result_measurements : List[IntResultMeasurement] empty list by default. List of all associated", "updates objects hint in database. Uses default session if none is passed. update_channel_names(channel_names:", "updated_info.bg_layer_id def delete_result_layer(self, layer_id: int): ''' Method to delete a result layer by", "n_z = self.metadata['pixel_size_z'] if n_z > 1: dims = np.array([z, y, x]) dims", "\"has_bg_layer\" and \"bg_layer_id\" are set to False and None. Parameters: - layer_id(int): Id", "imported Mistos image if \"uid\" == -2. Creates image in database which generates", "is passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.delete_image(self,", "layer_uid, sess) def create_in_db(self, sess=None): ''' Creates object in db. Paths and id", "fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml", "scaling from metadata, defaulting to None\") return dims def to_db_class(self): ''' Transforms internal", "fsr.save_json(self.metadata, db_image.path_metadata) # save metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\")", "turned to binary, assuming all labels > 0 to be background. Returns list", "db_image_result_layers = [result_layer.to_db_class() for result_layer in self.image_result_layers] db_result_measurements = [measurement.to_db_class() for measurement in", "= [] emtpy list by default. List of all associated DbResultMeasurement objects tags:", "assuming all labels > 0 to be background. Returns list of length n_channel", "to update the image hint. Parameters: - new_hint(str): string to be saved. -", "set layer as background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as", "== uid] if len(layers) > 0: return layers[0] else: warnings.warn( f\"IntImage.select channel could", "each channel if background layer is defined and zeros if no background layer", "if len(layers) > 0: return layers[0] else: warnings.warn( f\"IntImage.select channel could not select", "background_layer, corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches given layers and", "np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] = mask int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\",", "were imported in a single file name : str the objects name hint", "from db. ''' db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer): '''", "metadata will be saved to .json and loaded. hint : str, optional empty", "on_init(): Initializes object. Object is saved in database and file storage get_thumbnail_path(): Helper", "the background layer. path_metadata: pathlib.Path, optional path to the images metadata \".json\". Automatically", "maks\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path): mask", "could not be read!\") return None else: image_shape = self.data.shape if mask.shape ==", "== -2. Creates image in database which generates path and id. ''' if", "image_shape = self.data.shape if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1,", "other internal operations with images. Attributes ---------- uid : int the objects unique", "was provided in metadata. ''' x = self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z =", "id {uid}.\\nThis image has the associated layers \\n{layers}\", UserWarning) return None def calculate_background(self):", "kwargs[\"data\"] = data else: kwargs[\"data\"] = None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path(", "0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self, layer_id: int): ''' Method", "Resulting layer will be initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int name:", "copy of channel with shape (z,y,x). Parameters: - channel(int): index of channel to", "def update_hint(self, new_hint: str, sess=None): ''' This function expects a new hint as", "kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] =", "creates object in database, updates objects path and uid attributes accordingly. Uses default", "is not found in self.image_result_layers calculate_background() -> List: Returns list of length n_channels.", "image from db and updates the objects attributes. delete_result_layer(layer_id: int): Deletes the layer", "save metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString(", "''' db_image = self.to_db_class() updated_info = db_image.refresh_from_db() self.name = updated_info.name self.hint = updated_info.hint", "pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads pixel dimensions and returns relative", "hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return measurement_result def get_classifiers(self, clf_type:", "= self.select_result_layer(layer_id) if layer_id == self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int],", "sess = None): updates objects hint in database. Uses default session if none", "str, sess = None): updates objects hint in database. Uses default session if", "Parameters: - channel(int): index of channel to be selected. ''' channel_data = copy.deepcopy(self.data[:,", "dims = np.array([z, y, x]) dims = dims/dims.max() elif n_z == 1: dims", "class to handle database and file storage of Images Attributes ---------- uid :", "accordingly. Uses default session if none is passed. refresh_from_db() -> DbImage Fetches image", "be used as background layer. - sess(sqlalchemy.orm.Session): The database session to be used,", "as background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as background layer.", "all classifiers of given type from database. Returns dictionary of format {\"UID_NAME\": UID}.", "channel if background layer is defined and zeros if no background layer is", "description of the object experiment_ids: List[int] empty list by default. List of experiments_group", "will be binarized, all labels > 0 will be unified and represented as", "background layer is defined and zeros if no background layer is defined. measure_mask_in_image(layer_id:", "the ground truth: {layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array", "class. ''' kwargs = self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for", "import when image was imported has_bg_layer : bool False by default. Indicates if", "edits \"custom_channel_names\" attribute of image in it's metadata.json delete_from_system(sess = None): deletes object", "''' A class to handle calculations and other internal operations with images. Attributes", "get_classifiers(self, clf_type: str): ''' Loads all classifiers of given type from database. Returns", "object and initializes it (save to db and file storage) Returns IntResultMeasurement object:", "''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None): '''", "= self.data.shape[1] mean_pixel = [] for n in range(n_channel): channel_data = self.select_channel(n) selection", "bg_mask.sum() n_channel = self.data.shape[1] mean_pixel = [] for n in range(n_channel): channel_data =", "path to the thumbnail on fileserver. Gets gets fileserver path and joins with", "Set of keywords to work with in the frontend data : Any ___TO", "to_com_class()->app.api.classes_com.ComImage: returns object as com_class. set_bg_false(sess = None) sets \"has_bg_layer\" property to False", "Returns IntResultMeasurement object: measurement.measurement has shape: (n_labels, n_channel, n_features), n_features == 2 (n_pixels,", "metadata dict for whole series) Series metadatadict is passed into IntImage.on_init(). Thereafter, only", "attributes \"has_bg_layer\" and \"bg_layer_id\" are set to False and None. Parameters: - layer_id(int):", "''' Loads all classifiers of given type from database. Returns dictionary of format", "be saved. - sess(sqlalchemy.orm.Session): The database session to be used, if no session", "session is passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess)", "crud.create_image(self, sess) self.uid = sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self,", "print(f\"Importing archived Mistos image with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml =", "uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return measurement_result", "def add_layer_from_roi(self, path: Path): mask = utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\",", "the mean pixel values for each channel if background layer is defined and", "are imported via one file (image series), the index of the image is", "''' A class to handle database and file storage of Images Attributes ----------", "to .json and loaded. hint : str, optional empty string by default. brief", "scales in array with shape (z,y,x) or None if no scaling information was", "for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement in self.measurements] return IntImage(**kwargs)", "uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags )", "background layer. - sess(sqlalchemy.orm.Session): The database session to be used, if no session", "be used, if no session is passed default session will be used (app.api.dependencies.get_db).", "= [] image_result_layers: List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement] = [] tags: Set[str] =", "Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement] = [] tags: Set[str]", "bg_layer_id to None. Attribute is changed in db, then object attributes are reloaded", "is changed in db, then object attributes are reloaded from db. ''' db_image", "holds the mean pixel values for each channel if background layer is defined", "bg_mask.max() < 2: bg_mask = np.where(bg_mask > 0, 1, 0) n_pixel = bg_mask.sum()", "utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"]", "storage. Uses default session if none is passed. ''' uid: int series_index: int", "Handles image as new image if \"uid\" == -1 and as imported Mistos", "1, 0) n_pixel = bg_mask.sum() n_channel = self.data.shape[1] mean_pixel = [] for n", "measure_mask_in_image(self, layer_id: int): ''' Method to measure mask and save result as ResultMeasurement.", "layer_uid(int): uid of result layer to be used as background layer. - sess(sqlalchemy.orm.Session):", "in database and file storage. Uses default session if none is passed. '''", "from pathlib import Path from typing import Any, List, Optional, Set import numpy", "of measured pixels. ''' if self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask", "layer = self.select_result_layer(layer_id) if layer_id == self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list:", "is saved to database. image_result_layers: List[DbImageResultLayer] = [] emtpy list by default. List", "= self.select_result_layer(bg_uid) bg_mask = bg_layer.data else: bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] ))", "set by default. Set of keywords to work with in the frontend data", "kwargs = self.dict() # Only load the full image if not already loaded", "a single file name : str the objects name hint : str =", "all associated DbImageResultLayer objects measurements: List[DbResultMeasurement] = [] emtpy list by default. List", "= self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask = bg_layer.data else: bg_mask = np.zeros(( self.data.shape[0],", "array with shape (z,y,x) or None. to_db_class() -> app.api.classes_db.DbImage: Returns object as DbImage", "dimensions. Returns dimensions normalized scales in array with shape (z,y,x) or None if", "for layer_id in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint = f\"Following Label Layers", "in self.image_result_layers calculate_background() -> List: Returns list of length n_channels. List holds the", "dict for whole series) Series metadatadict is passed into IntImage.on_init(). Thereafter, only image's", "None if no bg_layer selected, otherwise it holds the bg_layer_id Methods ------- on_init():", "delete all associated files and db entries Parameters: - sess(sqlalchemy.orm.Session): The database session", "fileserver_requests as fsr from app.api import utils_import, utils_paths, utils_results from app.api.classes.image_result_layer import (DbImageResultLayer,", "in self.image_result_layers if _.uid == uid] if len(layers) > 0: return layers[0] else:", "and as imported Mistos image if \"uid\" == -2. Creates image in database", "None print(\"Couldn't calculate scaling from metadata, defaulting to None\") return dims def to_db_class(self):", "self.uid == -1: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid original_filename = self.metadata[\"original_filename\"]", "if multiple images were imported in a single file name : str the", "identifier name : str the objects name series_index : int if multiple images", "and id are generated and updated in object. Parameters: - sess(sqlalchemy.orm.Session): The database", "process. to_com_class()->app.api.classes_com.ComImage: returns object as com_class. set_bg_false(sess = None) sets \"has_bg_layer\" property to", "xml data as read by bioformats import when image was imported has_bg_layer :", "IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return", "be saved to .json and loaded. hint : str, optional empty string by", "import ComImage from app.api.dependencies import check_sess from pydantic import BaseModel, constr from pathlib", "db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer): ''' Method to set", "file storage get_thumbnail_path(): Helper function which returns path to the thumbnail on fileserver.", "List of experiments_group ids which use the image. image_result_layers : List[IntImageResultLayer] empty list", "self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if n_z >", "by default. brief description of the object has_bg_layer: bool = False indicator if", "array of shape (z,c,y,x) in which the image is stored. Is loaded from", "uid of result layer to be used as background layer. - sess(sqlalchemy.orm.Session): The", "np.array: Helper method expects channel index. Returns deep copy of channel with shape", "names as list of strings. opens metadata.json and edits custom_channel_names Parameters: - channel_names(List[str]):", "path to the thumbnail on fileserver. get_image_scaling(): Returns dimensions normalized scales in array", "dims = np.array([y, x]) dims = dims/dims.max() else: dims = None print(\"Couldn't calculate", "for image_result_layer in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for measurement in self.measurements] kwargs[\"seriesIndex\"] =", "image from db. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if", "updated_info.name self.hint = updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements = updated_info.result_measurements", "identifier series_index : int index of image if multiple images were imported in", "stored. Is loaded from .zarr files, most numpy operations work, some may cause", "Parameters: - clf_type(str): Valid classifier type, for available types see app.api.cfg_classes.classifier_types. ''' #", "layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1, image_shape[-2], image_shape[-1]): _mask", "------- to_int_class()->app.api.classes_internal.IntImage: returns object as int_class. Loads layer array from file path in", "bg_layer = self.select_result_layer(bg_uid) bg_mask = bg_layer.data else: bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3]", "Expects the bg_uid to belong to a result layer of this image. Result", "utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid", "kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class()", "= channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None): ''' calls crud.delete_image and passed db_image", "it holds the bg_layer_id Methods ------- on_init(): Initializes object. Object is saved in", "tags : Set[str]: empty set by default. Set of keywords to work with", "= [measurement.to_db_class() for measurement in self.result_measurements] db_image = DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint,", "type(mask) == type(None): warnings.warn(\"Image could not be read!\") return None else: image_shape =", "''' sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self, channel_names: List[str]): ''' This", "session if none is passed. refresh_from_db() -> DbImage Fetches image from database and", "default session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess) def", "= IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db()", "name: str hint: str = \"\" has_bg_layer: bool = False bg_layer_id: Optional[int] path_metadata:", "from multiple layers with by SimpleITK's STAPLE probabilities. For ground truth estimation layer", "def set_bg_true(self, image_layer: IntImageResultLayer): ''' Method to set layer as background layer. Parameters:", "= dims/dims.max() else: dims = None print(\"Couldn't calculate scaling from metadata, defaulting to", "to binary, assuming all labels > 0 to be background. Returns list of", "> 0 to be background. Returns list of length n_channel with mean intensity", "string by default. brief description of the object has_bg_layer: bool = False indicator", "app.api.classes_db.DbImage: Returns object as DbImage object. set_bg_false(): Helper function to set has_bg_layer to", "normalized scales in array with shape (z,y,x) or None. to_db_class() -> app.api.classes_db.DbImage: Returns", "measurement_result def get_classifiers(self, clf_type: str): ''' Loads all classifiers of given type from", "db class representation. ''' db_image_result_layers = [result_layer.to_db_class() for result_layer in self.image_result_layers] db_result_measurements =", "measurement_summary = utils_results.calculate_measurement( image_array, layer.data) measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid,", "Fetches image from database and returns DbImage object. update_hint(new_hint: str, sess = None):", "IntImage(**kwargs) def to_com_class(self): ''' Returns obect as com class. ''' kwargs = self.dict()", "id. ''' if self.uid == -1: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid", "mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid,", "accordingly. Does not reload image data again ''' db_image = self.to_db_class() updated_info =", "if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\",", "none is passed. ''' uid: int series_index: int name: str hint: str =", "used (app.api.dependencies.get_db) ''' sess = check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid, sess, for_refresh=True) return", "as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads pixel dimensions and returns", "Paths and id are generated and updated in object. Parameters: - sess(sqlalchemy.orm.Session): The", "app import fileserver_requests as fsr from app.api import utils_import, utils_paths, utils_results from app.api.classes.image_result_layer", "self.uid = db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename #", "dictionary of format {\"UID_NAME\": UID}. Mainly for use in napari viewer. Parameters: -", "series) Series metadatadict is passed into IntImage.on_init(). Thereafter, only image's metadata will be", "''' if suffix == None: suffix = \"\" else: suffix = \"_\" +", "None): deletes object in database and file storage. Uses default session if none", "of given type from database. Returns dictionary of format {\"UID_NAME\": UID}. Mainly for", "whole series) Series metadatadict is passed into IntImage.on_init(). Thereafter, only image's metadata will", "---------- uid : int the objects unique identifier name : str the objects", "image if multiple images were imported in a single file name : str", "attributes. delete_result_layer(layer_id: int): Deletes the layer from database, file storage and the image.", "of channel with shape (z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None: Returns layer", "List[int] = [] image_result_layers: List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement] = [] tags: Set[str]", "with mean intensity of measured pixels. ''' if self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer", "categorize objects in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object as int_class. Loads layer", "add_layer_from_mask(path) ''' uid: int name: str series_index: int metadata: dict hint: Optional[str] =", "format {\"UID_NAME\": UID}. Mainly for use in napari viewer. Parameters: - clf_type(str): Valid", "from file path in the process. to_com_class()->app.api.classes_com.ComImage: returns object as com_class. set_bg_false(sess =", "to delete all associated files and db entries Parameters: - sess(sqlalchemy.orm.Session): The database", "[measurement.to_int_class() for measurement in self.measurements] return IntImage(**kwargs) def to_com_class(self): ''' Returns obect as", "from os import name import warnings import xml from pathlib import Path from", "database session to be used, if no session is passed default session will", "path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml)", "mistos. As created by app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole series) Series metadatadict", "# save metadata dict fsr.save_json(self.metadata, db_image.path_metadata) # save metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path(", "channel index. Returns deep copy of channel with shape (z,y,x). select_result_layer(uid: int) ->", "def get_classifiers(self, clf_type: str): ''' Loads all classifiers of given type from database.", "to this image. - suffix(str): will be appended to layer name. ''' if", "= False bg_layer_id: Optional[int] def on_init(self): ''' Method to initialize the object. Handles", "to be saved as channel names. ''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names", "2 (n_pixels, sum_pixels) Parameters: - layer_id(int): uid of the layer to be measured", "updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer = updated_info.has_bg_layer", "to handle calculations and other internal operations with images. Attributes ---------- uid :", "to False and bg_layer_id to None. Attribute is changed in db, then object", "with id {uid}.\\nThis image has the associated layers \\n{layers}\", UserWarning) return None def", "images. Attributes ---------- uid : int the objects unique identifier name : str", "= self.to_db_class() updated_info = db_image.refresh_from_db() self.name = updated_info.name self.hint = updated_info.hint self.experiment_ids =", "expects a new hint as string and calls crud.update_image_hint to update the image", "self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename # save zarr fsr.save_zarr(self.data, db_image.path_image) #", "Returns deep copy of channel with shape (z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer |", ": int, optional None if no bg_layer selected, otherwise it holds the bg_layer_id", "loaded. hint : str, optional empty string by default. brief description of the", "pixel values for each channel if background layer is defined and zeros if", "to be used, if no session is passed default session will be used", "int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1, image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2],", "has shape: (n_labels, n_channel, n_features), n_features == 2 (n_pixels, sum_pixels) Parameters: - layer_id(int):", "hint : str = \"\" empty string by default. brief description of the", "set_bg_false(sess = None) sets \"has_bg_layer\" property to False in db. set_bg_true(layer_uid: int, sess", "(z,y,x). Parameters: - channel(int): index of channel to be selected. ''' channel_data =", "fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\")", "ids to be used for ground truth estimation. Must belong to this image.", "Method to set layer as background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be", "of the image is stored here metadata : dict reduced metadata for easy", "multiple images were imported in a single file name : str the objects", "= self.dict() # Only load the full image if not already loaded if", "layer_id(int): uid of the layer to be measured ''' image_array = self.data layer", "= self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid == -2: db_image", "created by app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole series) Series metadatadict is passed", "hint: str = \"\" has_bg_layer: bool = False bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image:", "z = self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if n_z > 1: dims = np.array([z,", "Loads layer array from file path in the process. to_com_class()->app.api.classes_com.ComImage: returns object as", "all associated DbResultMeasurement objects tags: Set[str] = [] set of string keywords to", "mask and save result as ResultMeasurement. Creates measurement object and initializes it (save", "is defined and zeros if no background layer is defined. measure_mask_in_image(layer_id: int) ->", "property to False in database. Parameters: - sess(sqlalchemy.orm.Session): The database session to be", "Gets gets fileserver path and joins with return value from utils_paths.make_thumbnail_path Returns path", "pathlib.Path, optional path to the images metadata \".json\". Automatically generated as image is", "default. List of all associated DbResultMeasurement objects tags: Set[str] = [] set of", "experiment_ids: List[int] empty list by default. List of experiments_group ids which use the", "int): layers = [_ for _ in self.image_result_layers if _.uid == uid] if", "result layer to be used as background layer. - sess(sqlalchemy.orm.Session): The database session", ") int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1, image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0],", "the image. image_result_layers : List[IntImageResultLayer] empty list by default. List of all associated", "utils_import.read_mask(path) if type(mask) == type(None): warnings.warn(\"Image could not be read!\") return None else:", "layer.data) measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary )", "layer to be measured ''' image_array = self.data layer = self.select_result_layer(layer_id) measurement, measurement_summary", "deep copy of channel with shape (z,y,x). Parameters: - channel(int): index of channel", "2: bg_mask = np.where(bg_mask > 0, 1, 0) n_pixel = bg_mask.sum() n_channel =", "Creates measurement object and initializes it (save to db and file storage) Returns", "Parameters: - channel_names(List[str]): List of strings to be saved as channel names. '''", "intensity of measured pixels. ''' if self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer = self.select_result_layer(bg_uid)", "Parameters: - layer_id(int): uid of the layer to be measured ''' image_array =", "mean_pixel def measure_mask_in_image(self, layer_id: int): ''' Method to measure mask and save result", "''' Sets images bg_layer_id property to given value. Parameters: - layer_uid(int): uid of", "image_array = self.data layer = self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement( image_array, layer.data) measurement_result", "''' kwargs = self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer", "to set layer as background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected", "if suffix == None: suffix = \"\" else: suffix = \"_\" + suffix", "path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return db_image def set_bg_false(self):", "np from app import crud from app import fileserver_requests as fsr from app.api", "no scaling information was provided in metadata. ''' x = self.metadata['pixel_size_physical_x'] y =", "estimate the ground truth: {layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\",", "import name import warnings import xml from pathlib import Path from typing import", "value from utils_paths.make_thumbnail_path Returns path as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): '''", "default session if none is passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute of image", "in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement in self.measurements] return IntImage(**kwargs) def to_com_class(self):", "and edits custom_channel_names Parameters: - channel_names(List[str]): List of strings to be saved as", "set has_bg_layer to False and bg_layer_id to None. Attribute is changed in db,", "Fetches data of this image from db and updates the objects attributes. delete_result_layer(layer_id:", "objects hint in database. Uses default session if none is passed. update_channel_names(channel_names: List[str])", "self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer): ''' Method to set layer as background layer.", "be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) sql_image = crud.create_image(self, sess) self.uid =", "may cause trouble. metadata_omexml : Any original metadata xml data as read by", "Helper function to set has_bg_layer to False and bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer):", "to be measured ''' image_array = self.data layer = self.select_result_layer(layer_id) measurement, measurement_summary =", "self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str = None): '''", "self.path_metadata) def delete_from_system(self, sess=None): ''' calls crud.delete_image and passed db_image object to delete", "= crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}: clf_dict[\"No classifers found\"] = None return clf_dict", "''' sess = check_sess(sess) sql_image = crud.create_image(self, sess) self.uid = sql_image.id self.path_image =", "''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads pixel dimensions and returns relative dimensions.", "if no scaling information was provided in metadata. ''' x = self.metadata['pixel_size_physical_x'] y", "name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path:", "def get_thumbnail_path(self): ''' Helper function which returns path to the thumbnail on fileserver.", "if n_z > 1: dims = np.array([z, y, x]) dims = dims/dims.max() elif", "truth from multiple layers with by SimpleITK's STAPLE probabilities. For ground truth estimation", "be selected. ''' channel_data = copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis, ...] return channel_data", "IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif", "measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return measurement_result def get_classifiers(self, clf_type: str): ''' Loads all", "else: dims = None print(\"Couldn't calculate scaling from metadata, defaulting to None\") return", "database. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no session", "image with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) #", "to_int_class(self, for_refresh=False): ''' Returns object as int class. Parameters: - for_refresh(bool = False):", "IntImageResultLayer): ''' Method to set layer as background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer", "gets fileserver path and joins with return value from utils_paths.make_thumbnail_path Returns path as", "Attributes ---------- uid : int the objects unique identifier series_index : int index", "imagaes has_bg_layer property to False in database. Parameters: - sess(sqlalchemy.orm.Session): The database session", "corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches given layers and uses", "string and calls crud.update_image_hint to update the image hint. Parameters: - new_hint(str): string", "str the objects name hint : str = \"\" empty string by default.", "given result layer and saves it to db and file storage. get_classifiers(clf_type: str)", "channel_names(List[str]): List of strings to be saved as channel names. ''' metadata =", "Object is saved in database and file storage get_thumbnail_path(): Helper function which returns", "image if \"uid\" == -1 and as imported Mistos image if \"uid\" ==", "= \"\" has_bg_layer: bool = False bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers:", "result_measurements=db_result_measurements, tags=self.tags ) return db_image def set_bg_false(self): ''' Helper function to set has_bg_layer", "series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return", "image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return db_image def set_bg_false(self): ''' Helper function to set", "shape (z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None: Returns layer with corresponding id,", "mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self, layer_id: int): ''' Method to measure mask and", "has_bg_layer: bool = False indicator if image has an associated background layer. bg_layer_id:", "int, optional None if no associated background layer, otherwise id of the background", "and file storage. get_classifiers(clf_type: str) -> dict: Fetches all saved classifiers from db,", "= [] def to_int_class(self, for_refresh=False): ''' Returns object as int class. Parameters: -", "expects channel index. Returns deep copy of channel with shape (z,y,x). Parameters: -", "generated as image is saved to database. image_result_layers: List[DbImageResultLayer] = [] emtpy list", "channel names as list of strings. opens metadata.json and edits custom_channel_names Parameters: -", "selected. ''' channel_data = copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis, ...] return channel_data def", "_mean = selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self, layer_id: int): ''' Method to", "of length n_channels. List holds the mean pixel values for each channel if", "saved to database. path_image: pathlib.Path, optional path to the images array \".zarr\" folder.", "or None. to_db_class() -> app.api.classes_db.DbImage: Returns object as DbImage object. set_bg_false(): Helper function", "self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for measurement", "crud.delete_image and passed db_image object to delete all associated files and db entries", "layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path): mask = utils_import.read_roi(path,", "in array with shape (z,y,x) or None. to_db_class() -> app.api.classes_db.DbImage: Returns object as", "= utils_import.read_mask(path) if type(mask) == type(None): warnings.warn(\"Image could not be read!\") return None", "indicator if image has an associated background layer. bg_layer_id: int, optional None if", "fsr.save_zarr(self.data, db_image.path_image) # save metadata dict fsr.save_json(self.metadata, db_image.path_metadata) # save metadata xml path_xml", "function expects a new channel names as list of strings. opens metadata.json and", "for each channel if background layer is defined and zeros if no background", "to be selected. ''' channel_data = copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis, ...] return", "in a single file name : str the objects name hint : str", "shape (z,y,x). Parameters: - channel(int): index of channel to be selected. ''' channel_data", "if self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask = bg_layer.data else: bg_mask", "function which returns path to the thumbnail on fileserver. Gets gets fileserver path", "= updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags =", "[] for n in range(n_channel): channel_data = self.select_channel(n) selection = np.where(bg_mask, channel_data, 0)", "updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self, layer_id:", "is passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) updated_db_image", "see app.api.cfg_classes.classifier_types. ''' # Fetches dict in form {name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type)", "fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer", "BE DONE: add custom field type___ array of shape (z,c,y,x) in which the", "bg_uid to belong to a result layer of this image. Result layer will", "''' Requests current information from db and updates the object's attributes accordingly. Does", "label layer to image, database and file storage. Parameters: - layer_id_list(List[int]): List of", "otherwise it holds the bg_layer_id Methods ------- on_init(): Initializes object. Object is saved", "clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}: clf_dict[\"No classifers found\"] = None return", "truth: {layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init()", "_.uid == uid] if len(layers) > 0: return layers[0] else: warnings.warn( f\"IntImage.select channel", "estimation. Must belong to this image. - suffix(str): will be appended to layer", "the image is stored. Is loaded from .zarr files, most numpy operations work,", "file storage of Images Attributes ---------- uid : int the objects unique identifier", "None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"]", "returns relative dimensions. Returns dimensions normalized scales in array with shape (z,y,x) or", "n_channel with mean intensity of measured pixels. ''' if self.has_bg_layer: bg_uid = self.bg_layer_id", "the image. If layer was background_layer, corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix:", "[image_result_layer.to_int_class() for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for measurement in self.measurements] return", "x = self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if", "elif self.uid == -2: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid print(f\"Importing archived", "fileserver. Gets gets fileserver path and joins with return value from utils_paths.make_thumbnail_path Returns", "internal class representation to db class representation. ''' db_image_result_layers = [result_layer.to_db_class() for result_layer", "hint as string and calls crud.update_image_hint to update the image hint. Parameters: -", "if image has an associated background layer. bg_layer_id: int, optional None if no", "dims = dims/dims.max() elif n_z == 1: dims = np.array([y, x]) dims =", "np.array([z, y, x]) dims = dims/dims.max() elif n_z == 1: dims = np.array([y,", "from app.api import utils_import, utils_paths, utils_results from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement", "def to_int_class(self, for_refresh=False): ''' Returns object as int class. Parameters: - for_refresh(bool =", "layers with by SimpleITK's STAPLE probabilities. For ground truth estimation layer will be", "passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) sql_image =", "background layer is defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns measurement object for given", "default session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess)", "updated_db_image = crud.read_image_by_uid( self.uid, sess, for_refresh=True) return updated_db_image def update_hint(self, new_hint: str, sess=None):", "set_bg_false(): Helper function to set has_bg_layer to False and bg_layer_id to None. set_bg_true(image_layer:", "True in db. sets bg_layer_id to given value. create_in_db(sess = None): creates object", "appended to layer name. ''' if suffix == None: suffix = \"\" else:", "array from file path in the process. to_com_class()->app.api.classes_com.ComImage: returns object as com_class. set_bg_false(sess", "as list of strings. opens metadata.json and edits custom_channel_names Parameters: - channel_names(List[str]): List", "via one file (image series), the index of the image is stored here", "Helper method expects channel index. Returns deep copy of channel with shape (z,y,x).", "ResultMeasurement. Creates measurement object and initializes it (save to db and file storage)", "n_features == 2 (n_pixels, sum_pixels) Parameters: - layer_id(int): uid of the layer to", "create_in_db(sess = None): creates object in database, updates objects path and uid attributes", "reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches given layers and uses SimpleITKs STAPLE algorithm", "could not select layer with id {uid}.\\nThis image has the associated layers \\n{layers}\",", "np.where(bg_mask, channel_data, 0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self, layer_id: int):", "mask\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1, image_shape[-2],", "fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"]", "< 2: bg_mask = np.where(bg_mask > 0, 1, 0) n_pixel = bg_mask.sum() n_channel", "data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else: kwargs[\"data\"] = None metadata = fsr.load_json(self.path_metadata)", "metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid == -2:", "optional path to the images array \".zarr\" folder. Automatically generated as image is", "be appended to layer name. ''' if suffix == None: suffix = \"\"", "kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return", "binarized, all labels > 0 will be unified and represented as foreground (==1)", "to False in db. set_bg_true(layer_uid: int, sess = None) sets \"has_bg_layer\" property to", "False bg_layer_id: Optional[int] def on_init(self): ''' Method to initialize the object. Handles image", "= None): ''' Method to estimate ground truth from multiple layers with by", "Path): mask = utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid,", "ComImage from app.api.dependencies import check_sess from pydantic import BaseModel, constr from pathlib import", "uid : int the objects unique identifier name : str the objects name", "function to set has_bg_layer to False and bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method", "from utils_paths.make_thumbnail_path Returns path as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads", "is passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) sql_image", "data again ''' db_image = self.to_db_class() updated_info = db_image.refresh_from_db() self.name = updated_info.name self.hint", "layer_uid = image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel: int): '''", "Method to delete a result layer by uid. If result layer is selected", "estimation layer will be binarized, all labels > 0 will be unified and", "int index of image if multiple images were imported in a single file", "on fileserver. Gets gets fileserver path and joins with return value from utils_paths.make_thumbnail_path", "by default. List of all associated DbResultMeasurement objects tags: Set[str] = [] set", "import Any, List, Optional, Set import numpy as np from app import crud", "empty list by default. List of all associated IntResultMeasurement objects tags : Set[str]:", ": dict reduced metadata for easy use within mistos. As created by app.api.utils_import.acquire_metadata_dict", "metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None): ''' calls crud.delete_image and passed", "= fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else: kwargs[\"data\"] = None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path", "Any original metadata xml data as read by bioformats import when image was", "sess = check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid, sess, for_refresh=True) return updated_db_image def update_hint(self,", "file storage. ''' kwargs = self.dict() # Only load the full image if", "mean pixel values for each channel if background layer is defined and zeros", "Attributes ---------- uid : int the objects unique identifier name : str the", "will be appended to layer name. ''' if suffix == None: suffix =", "channel_data, 0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self, layer_id: int): '''", "as background layer select_channel(channel: int) -> np.array: Helper method expects channel index. Returns", "sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self, sess=None): ''' Creates object in", "kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs) def set_bg_false(self, sess=None): ''' Sets", "app.api.dependencies import check_sess from pydantic import BaseModel, constr from pathlib import Path class", "not reloaded from file storage. ''' kwargs = self.dict() # Only load the", "image in it's metadata.json delete_from_system(sess = None): deletes object in database and file", "channel_data = self.select_channel(n) selection = np.where(bg_mask, channel_data, 0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean) return", "channel with shape (z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None: Returns layer with", "---------- uid : int the objects unique identifier series_index : int index of", "Fetches all saved classifiers from db, filters for type and returns dictionary of", "result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return measurement_result def get_classifiers(self, clf_type: str): '''", "a result layer of this image. Result layer will be turned to binary,", "False by default. Indicates if image as layer selected as background_layer. bg_layer_id :", "as com class. ''' kwargs = self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] =", "class IntImage(BaseModel): ''' A class to handle calculations and other internal operations with", "Set[str]: empty set by default. Set of keywords to work with in the", "as ResultMeasurement. Creates measurement object and initializes it (save to db and file", "utils_import, utils_paths, utils_results from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement)", "Indicates if image as layer selected as background_layer. bg_layer_id : int, optional None", "data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path): mask = utils_import.read_roi(path, self.data.shape)", "to False in database. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used,", "passed default session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess)", "add_layer_from_mask(self, path: Path): mask = utils_import.read_mask(path) if type(mask) == type(None): warnings.warn(\"Image could not", ": str = \"\" empty string by default. brief description of the object", "SimpleITKs STAPLE algorithm to estimate ground truth. Resulting layer will be initialized as", "function expects a new hint as string and calls crud.update_image_hint to update the", "layer was background_layer, corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches given", "''' Transforms internal class representation to db class representation. ''' db_image_result_layers = [result_layer.to_db_class()", "in the process. to_com_class()->app.api.classes_com.ComImage: returns object as com_class. set_bg_false(sess = None) sets \"has_bg_layer\"", "layer_id_list(List[int]): List of layer ids to be used for ground truth estimation. Must", "to handle database and file storage of Images Attributes ---------- uid : int", "add custom field type___ array of shape (z,c,y,x) in which the image is", "by default. brief description of the object experiment_ids: List[int] empty list by default.", "layer of this image. Result layer will be turned to binary, assuming all", "Set[str] = [] set of string keywords to easily categorize objects in frontend.", "delete_from_system(sess = None): deletes object in database and file storage. Uses default session", "some may cause trouble. metadata_omexml : Any original metadata xml data as read", "was imported has_bg_layer : bool False by default. Indicates if image as layer", "UserWarning) return None def calculate_background(self): ''' Expects the bg_uid to belong to a", "for easy use within mistos. As created by app.api.utils_import.acquire_metadata_dict (creates metadata dict for", "list by default. List of all associated IntImageResultLayer objects. result_measurements : List[IntResultMeasurement] empty", "return None else: image_shape = self.data.shape if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer", "tags=self.tags ) return db_image def set_bg_false(self): ''' Helper function to set has_bg_layer to", "dimensions normalized scales in array with shape (z,y,x) or None. to_db_class() -> app.api.classes_db.DbImage:", "set_bg_false(self, sess=None): ''' Sets imagaes has_bg_layer property to False in database. Parameters: -", "array is not reloaded from file storage. ''' kwargs = self.dict() # Only", "metadata for easy use within mistos. As created by app.api.utils_import.acquire_metadata_dict (creates metadata dict", "import crud from app import fileserver_requests as fsr from app.api import utils_import, utils_paths,", "get_thumbnail_path(self): ''' Helper function which returns path to the thumbnail on fileserver. Gets", "update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute of image in it's metadata.json delete_from_system(sess = None):", "sum_pixels) Parameters: - layer_id(int): uid of the layer to be measured ''' image_array", "0: return layers[0] else: warnings.warn( f\"IntImage.select channel could not select layer with id", "shape (z,c,y,x) in which the image is stored. Is loaded from .zarr files,", "''' Reads pixel dimensions and returns relative dimensions. Returns dimensions normalized scales in", "0 to be background. Returns list of length n_channel with mean intensity of", "to set layer as background layer select_channel(channel: int) -> np.array: Helper method expects", "= self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename # save zarr fsr.save_zarr(self.data, db_image.path_image) # save metadata", "series_index : int index of image if multiple images were imported in a", "(save to db and file storage) Returns IntResultMeasurement object: measurement.measurement has shape: (n_labels,", "image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] = mask int_result_layer = IntImageResultLayer( uid=-1,", "by bioformats import when image was imported has_bg_layer : bool False by default.", "method expects channel index. Returns deep copy of channel with shape (z,y,x). Parameters:", "return value from utils_paths.make_thumbnail_path Returns path as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self):", "objects name series_index : int if multiple images are imported via one file", "def add_layer_from_mask(self, path: Path): mask = utils_import.read_mask(path) if type(mask) == type(None): warnings.warn(\"Image could", "warnings.warn(\"Image could not be read!\") return None else: image_shape = self.data.shape if mask.shape", "list by default. List of all associated DbResultMeasurement objects tags: Set[str] = []", "db. sets bg_layer_id to given value. create_in_db(sess = None): creates object in database,", "select_channel(channel: int) -> np.array: Helper method expects channel index. Returns deep copy of", "will be saved to .json and loaded. hint : str, optional empty string", "Uses default session if none is passed. ''' uid: int series_index: int name:", "default. Indicates if image as layer selected as background_layer. bg_layer_id : int, optional", "image has an associated background layer. bg_layer_id: int, optional None if no associated", "uid: int series_index: int name: str hint: str = \"\" has_bg_layer: bool =", "None if id is not found in self.image_result_layers calculate_background() -> List: Returns list", "''' This function expects a new hint as string and calls crud.update_image_hint to", "layer_id(int): Id of result layer to be deleted. ''' layer = self.select_result_layer(layer_id) if", "handle calculations and other internal operations with images. Attributes ---------- uid : int", "index of the image is stored here metadata : dict reduced metadata for", "string by default. brief description of the object experiment_ids: List[int] empty list by", "Path): mask = utils_import.read_mask(path) if type(mask) == type(None): warnings.warn(\"Image could not be read!\")", "== -1 and as imported Mistos image if \"uid\" == -2. Creates image", "of experiments_group ids which use the image. image_result_layers : List[IntImageResultLayer] empty list by", "information from db and updates the object's attributes accordingly. Does not reload image", "self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1, image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...]", "result layer by uid. If result layer is selected as background layer, the", "= np.array([z, y, x]) dims = dims/dims.max() elif n_z == 1: dims =", "xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string,", "background layer. ''' layer_uid = image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self,", "= copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis, ...] return channel_data def select_result_layer(self, uid: int):", "from typing import Any, List, Optional, Set import numpy as np from app", "returns object as com_class. set_bg_false(sess = None) sets \"has_bg_layer\" property to False in", "return None def calculate_background(self): ''' Expects the bg_uid to belong to a result", "UID} refresh_from_db(): Fetches data of this image from db and updates the objects", "\"uid\" == -1 and as imported Mistos image if \"uid\" == -2. Creates", "measure mask and save result as ResultMeasurement. Creates measurement object and initializes it", "utils_results from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com", "no session is passed default session will be used (app.api.dependencies.get_db) ''' sess =", "hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return db_image def", "if image as layer selected as background_layer. bg_layer_id : int, optional None if", "normalized scales in array with shape (z,y,x) or None if no scaling information", "reloaded from db. ''' db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer):", "index of channel to be selected. ''' channel_data = copy.deepcopy(self.data[:, channel, ...])[ :,", "Returns dictionary of format {\"UID_NAME\": UID}. Mainly for use in napari viewer. Parameters:", "(app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid: int, sess=None): '''", "name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path):", "pathlib.Path, optional path to the images array \".zarr\" folder. Automatically generated as image", "y = self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if n_z > 1:", "del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"]", "metadata \".json\". Automatically generated as image is saved to database. path_image: pathlib.Path, optional", "list of strings. opens metadata.json and edits custom_channel_names Parameters: - channel_names(List[str]): List of", "int if multiple images are imported via one file (image series), the index", "''' if self.uid == -1: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid original_filename", "database. path_image: pathlib.Path, optional path to the images array \".zarr\" folder. Automatically generated", "STAPLE algorithm to estimate ground truth. Resulting layer will be initialized as IntResultLayer.", "layer will be turned to binary, assuming all labels > 0 to be", "= utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): ''' Helper function which", "strings to be saved as channel names. ''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] =", "which the image is stored. Is loaded from .zarr files, most numpy operations", "pydantic import BaseModel, constr from pathlib import Path class DbImage(BaseModel): ''' A class", "0 will be unified and represented as foreground (==1) for calculation. Saves label", "int series_index: int name: str hint: str = \"\" has_bg_layer: bool = False", "session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid,", "self.data layer = self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement( image_array, layer.data) measurement_result = IntResultMeasurement(", "id of the background layer. path_metadata: pathlib.Path, optional path to the images metadata", "layer select_channel(channel: int) -> np.array: Helper method expects channel index. Returns deep copy", "image if not already loaded if for_refresh == False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"]", "were used to estimate the ground truth: {layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\",", "name series_index : int if multiple images are imported via one file (image", "data as read by bioformats import when image was imported has_bg_layer : bool", "all associated IntResultMeasurement objects tags : Set[str]: empty set by default. Set of", "fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid == -2: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid", "layer and saves it to db and file storage. get_classifiers(clf_type: str) -> dict:", "shape (z,y,x) or None if no scaling information was provided in metadata. '''", "- layer_id(int): uid of the layer to be measured ''' image_array = self.data", "of layer ids to be used for ground truth estimation. Must belong to", "string to be saved. - sess(sqlalchemy.orm.Session): The database session to be used, if", "name. ''' if suffix == None: suffix = \"\" else: suffix = \"_\"", "def update_channel_names(self, channel_names: List[str]): ''' This function expects a new channel names as", "[] tags: Set[str] = [] def to_int_class(self, for_refresh=False): ''' Returns object as int", "-2: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid print(f\"Importing archived Mistos image with", "np.where(bg_mask > 0, 1, 0) n_pixel = bg_mask.sum() n_channel = self.data.shape[1] mean_pixel =", "= None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del", "n_features), n_features == 2 (n_pixels, sum_pixels) Parameters: - layer_id(int): uid of the layer", "fsr from app.api import utils_import, utils_paths, utils_results from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from", "bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return db_image def set_bg_false(self): ''' Helper function", "= xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid == -2: db_image = self.to_db_class() db_image.create_in_db()", "images are imported via one file (image series), the index of the image", "by default. List of experiments_group ids which use the image. image_result_layers : List[IntImageResultLayer]", "= updated_info.bg_layer_id def delete_result_layer(self, layer_id: int): ''' Method to delete a result layer", "== 2 (n_pixels, sum_pixels) Parameters: - layer_id(int): uid of the layer to be", "com_class. set_bg_false(sess = None) sets \"has_bg_layer\" property to False in db. set_bg_true(layer_uid: int,", "passed. ''' uid: int series_index: int name: str hint: str = \"\" has_bg_layer:", "database and file storage. Uses default session if none is passed. ''' uid:", "by default. Set of keywords to work with in the frontend data :", "= self.data layer = self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement( image_array, layer.data) measurement_result =", "[image_result_layer.to_com_class() for image_result_layer in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for measurement in self.measurements] kwargs[\"seriesIndex\"]", "image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init() self.refresh_from_db() return measurement_result def get_classifiers(self, clf_type: str):", "for_refresh=True) return updated_db_image def update_hint(self, new_hint: str, sess=None): ''' This function expects a", "to be used as background layer. - sess(sqlalchemy.orm.Session): The database session to be", "multiple layers with by SimpleITK's STAPLE probabilities. For ground truth estimation layer will", "> 0: return layers[0] else: warnings.warn( f\"IntImage.select channel could not select layer with", "''' calls crud.delete_image and passed db_image object to delete all associated files and", "which returns path to the thumbnail on fileserver. Gets gets fileserver path and", "in self.result_measurements] db_image = DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id,", "thumbnail on fileserver. Gets gets fileserver path and joins with return value from", "Any metadata_omexml: Any has_bg_layer: bool = False bg_layer_id: Optional[int] def on_init(self): ''' Method", "layer. - sess(sqlalchemy.orm.Session): The database session to be used, if no session is", "Thereafter, only image's metadata will be saved to .json and loaded. hint :", "False and bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set layer as background", ") return db_image def set_bg_false(self): ''' Helper function to set has_bg_layer to False", "bg_layer.data else: bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if bg_mask.max() < 2:", "= fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del kwargs[\"path_image\"]", "self.path_image = Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): ''' Refreshes object image", "set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set layer as background layer select_channel(channel: int) -> np.array:", "unified and represented as foreground (==1) for calculation. Saves label layer to image,", "ground truth. Resulting layer will be initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid:", "Optional[str] = \"\" experiment_ids: List[int] = [] image_result_layers: List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement]", "given value. create_in_db(sess = None): creates object in database, updates objects path and", "layer_id in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint = f\"Following Label Layers were", "int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid)", "mean intensity of measured pixels. ''' if self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer =", "with corresponding id, returns None if id is not found in self.image_result_layers calculate_background()", "are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches given layers and uses SimpleITKs STAPLE", "''' x = self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z']", "= \"\" empty string by default. brief description of the object has_bg_layer: bool", "Uses default session if none is passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute of", ": bool False by default. Indicates if image as layer selected as background_layer.", "handle database and file storage of Images Attributes ---------- uid : int the", "Transforms internal class representation to db class representation. ''' db_image_result_layers = [result_layer.to_db_class() for", "DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags", "list of length n_channels. List holds the mean pixel values for each channel", "of string keywords to easily categorize objects in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns", "mask = utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\",", "delete_result_layer(layer_id: int): Deletes the layer from database, file storage and the image. If", "ground truth estimation layer will be binarized, all labels > 0 will be", "function which returns path to the thumbnail on fileserver. get_image_scaling(): Returns dimensions normalized", "has_bg_layer property to False in database. Parameters: - sess(sqlalchemy.orm.Session): The database session to", "from db and updates the object's attributes accordingly. Does not reload image data", "refresh_from_db(): Fetches data of this image from db and updates the objects attributes.", "= False): If True, image array is not reloaded from file storage. '''", "= self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if n_z > 1: dims", "self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str = None): ''' Method to estimate", "clf_dict def refresh_from_db(self): ''' Requests current information from db and updates the object's", "image is saved to database. path_image: pathlib.Path, optional path to the images array", ") int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path): mask = utils_import.read_roi(path, self.data.shape) int_result_layer", "layer as background layer select_channel(channel: int) -> np.array: Helper method expects channel index.", "bg_mask = bg_layer.data else: bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if bg_mask.max()", "kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class()", "measurements: List[DbResultMeasurement] = [] emtpy list by default. List of all associated DbResultMeasurement", "...])[ :, np.newaxis, ...] return channel_data def select_result_layer(self, uid: int): layers = [_", "sess) self.uid = sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self, sess=None):", "edits custom_channel_names Parameters: - channel_names(List[str]): List of strings to be saved as channel", "otherwise id of the background layer. path_metadata: pathlib.Path, optional path to the images", "to be used for ground truth estimation. Must belong to this image. -", "= [] set of string keywords to easily categorize objects in frontend. Methods", "Automatically generated as image is saved to database. path_image: pathlib.Path, optional path to", "image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path): mask =", "n in range(n_channel): channel_data = self.select_channel(n) selection = np.where(bg_mask, channel_data, 0) _mean =", "{layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db()", "ids which use the image. image_result_layers : List[IntImageResultLayer] empty list by default. List", "sess) def create_in_db(self, sess=None): ''' Creates object in db. Paths and id are", "file storage) Returns IntResultMeasurement object: measurement.measurement has shape: (n_labels, n_channel, n_features), n_features ==", "utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): ''' Helper function which returns", "measurement.measurement has shape: (n_labels, n_channel, n_features), n_features == 2 (n_pixels, sum_pixels) Parameters: -", "layer to be used as background layer. - sess(sqlalchemy.orm.Session): The database session to", "bg_layer_id Methods ------- on_init(): Initializes object. Object is saved in database and file", "IntImage(BaseModel): ''' A class to handle calculations and other internal operations with images.", "layer as background layer. Parameters: - image_layer(app.api.classes_int.IntImageImageResultLayer): Layer to be selected as background", "property to given value. Parameters: - layer_uid(int): uid of result layer to be", "Creates object in db. Paths and id are generated and updated in object.", "\"custom_channel_names\" attribute of image in it's metadata.json delete_from_system(sess = None): deletes object in", "List of layer ids to be used for ground truth estimation. Must belong", "measured ''' image_array = self.data layer = self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement( image_array,", "def to_com_class(self): ''' Returns obect as com class. ''' kwargs = self.dict() kwargs[\"metadata\"]", "...] return channel_data def select_result_layer(self, uid: int): layers = [_ for _ in", "corresponding id, returns None if id is not found in self.image_result_layers calculate_background() ->", "the objects name hint : str = \"\" empty string by default. brief", "relative dimensions. Returns dimensions normalized scales in array with shape (z,y,x) or None", "int): ''' Helper method expects channel index. Returns deep copy of channel with", "bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set layer as background layer select_channel(channel:", "path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml)", "stored here metadata : dict reduced metadata for easy use within mistos. As", "self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel: int): ''' Helper method expects channel index.", "int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path): mask = utils_import.read_roi(path, self.data.shape) int_result_layer =", "to database. image_result_layers: List[DbImageResultLayer] = [] emtpy list by default. List of all", "[] emtpy list by default. List of all associated DbImageResultLayer objects measurements: List[DbResultMeasurement]", "== None: suffix = \"\" else: suffix = \"_\" + suffix label_array_list =", "DbImageResultLayer objects measurements: List[DbResultMeasurement] = [] emtpy list by default. List of all", "metadatadict is passed into IntImage.on_init(). Thereafter, only image's metadata will be saved to", "self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if n_z > 1: dims = np.array([z, y, x])", "ground truth from multiple layers with by SimpleITK's STAPLE probabilities. For ground truth", "def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str = None): ''' Method to estimate ground", "utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save", "''' Method to estimate ground truth from multiple layers with by SimpleITK's STAPLE", "objects unique identifier name : str the objects name series_index : int if", "it's metadata.json delete_from_system(sess = None): deletes object in database and file storage. Uses", "updated in object. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if", "calculate_background(self): ''' Expects the bg_uid to belong to a result layer of this", "select_result_layer(self, uid: int): layers = [_ for _ in self.image_result_layers if _.uid ==", "image hint. Parameters: - new_hint(str): string to be saved. - sess(sqlalchemy.orm.Session): The database", "used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self, sess=None): '''", "index. Returns deep copy of channel with shape (z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer", "Helper function to set has_bg_layer to False and bg_layer_id to None. Attribute is", "layer_id: int): ''' Method to measure mask and save result as ResultMeasurement. Creates", "thumbnail_path) def get_thumbnail_path(self): ''' Helper function which returns path to the thumbnail on", "= [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint =", "= metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer in self.image_result_layers] kwargs[\"result_measurements\"] = [measurement.to_int_class() for", "+ suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte(", "default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) sql_image = crud.create_image(self,", "uid of the layer to be measured ''' image_array = self.data layer =", "if type(mask) == type(None): warnings.warn(\"Image could not be read!\") return None else: image_shape", "self.uid = db_image.uid print(f\"Importing archived Mistos image with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata,", "int, sess = None) sets \"has_bg_layer\" property to True in db. sets bg_layer_id", "''' if self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask = bg_layer.data else:", "obect as com class. ''' kwargs = self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"]", "data else: kwargs[\"data\"] = None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"]", "None: suffix = \"\" else: suffix = \"_\" + suffix label_array_list = [crud.read_result_layer_by_uid(", "image is stored here metadata : dict reduced metadata for easy use within", "= original_filename # save zarr fsr.save_zarr(self.data, db_image.path_image) # save metadata dict fsr.save_json(self.metadata, db_image.path_metadata)", "optional None if no associated background layer, otherwise id of the background layer.", "image from database and returns DbImage object. update_hint(new_hint: str, sess = None): updates", "of image in it's metadata.json delete_from_system(sess = None): deletes object in database and", "layer ids to be used for ground truth estimation. Must belong to this", "object in db. Paths and id are generated and updated in object. Parameters:", "measurement object for given result layer and saves it to db and file", "multiple images are imported via one file (image series), the index of the", "List of all associated IntImageResultLayer objects. result_measurements : List[IntResultMeasurement] empty list by default.", "session to be used, if no session is passed default session will be", "selection = np.where(bg_mask, channel_data, 0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def measure_mask_in_image(self,", "Mainly for use in napari viewer. Parameters: - clf_type(str): Valid classifier type, for", "storage of Images Attributes ---------- uid : int the objects unique identifier series_index", "= check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self, channel_names: List[str]): ''' This function expects", "the object experiment_ids: List[int] empty list by default. List of experiments_group ids which", "viewer. Parameters: - clf_type(str): Valid classifier type, for available types see app.api.cfg_classes.classifier_types. '''", "and id. ''' if self.uid == -1: db_image = self.to_db_class() db_image.create_in_db() self.uid =", "int, optional None if no bg_layer selected, otherwise it holds the bg_layer_id Methods", "as new image if \"uid\" == -1 and as imported Mistos image if", "used, if no session is passed default session will be used (app.api.dependencies.get_db) '''", "utils_results.staple_gte( label_array_list) hint = f\"Following Label Layers were used to estimate the ground", "Set import numpy as np from app import crud from app import fileserver_requests", "name: str series_index: int metadata: dict hint: Optional[str] = \"\" experiment_ids: List[int] =", "-> dict: Fetches all saved classifiers from db, filters for type and returns", "layer array from file path in the process. to_com_class()->app.api.classes_com.ComImage: returns object as com_class.", "of Images Attributes ---------- uid : int the objects unique identifier series_index :", "self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str = None): ''' Method", "and file storage) Returns IntResultMeasurement object: measurement.measurement has shape: (n_labels, n_channel, n_features), n_features", "bg_layer_id: int, optional None if no associated background layer, otherwise id of the", "of shape (z,c,y,x) in which the image is stored. Is loaded from .zarr", "with by SimpleITK's STAPLE probabilities. For ground truth estimation layer will be binarized,", "channel could not select layer with id {uid}.\\nThis image has the associated layers", "a result layer by uid. If result layer is selected as background layer,", "the layer from database, file storage and the image. If layer was background_layer,", "= [image_result_layer.to_com_class() for image_result_layer in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for measurement in self.measurements]", "Fetches dict in form {name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}:", "found\"] = None return clf_dict def refresh_from_db(self): ''' Requests current information from db", "result as ResultMeasurement. Creates measurement object and initializes it (save to db and", "to True in db. sets bg_layer_id to given value. create_in_db(sess = None): creates", "not be read!\") return None else: image_shape = self.data.shape if mask.shape == (image_shape[0],", "= list(self.tags) return ComImage(**kwargs) def set_bg_false(self, sess=None): ''' Sets imagaes has_bg_layer property to", "Optional[int] def on_init(self): ''' Method to initialize the object. Handles image as new", "given type from database. Returns dictionary of format {\"UID_NAME\": UID}. Mainly for use", "file storage. Parameters: - layer_id_list(List[int]): List of layer ids to be used for", "if no background layer is defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns measurement object", "the full image if not already loaded if for_refresh == False: data =", "kwargs[\"measurements\"] = [measurement.to_com_class() for measurement in self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer", "this image from db and updates the objects attributes. delete_result_layer(layer_id: int): Deletes the", "n_z > 1: dims = np.array([z, y, x]) dims = dims/dims.max() elif n_z", "app.api.classes_com import ComImage from app.api.dependencies import check_sess from pydantic import BaseModel, constr from", "Method to initialize the object. Handles image as new image if \"uid\" ==", "as string and calls crud.update_image_hint to update the image hint. Parameters: - new_hint(str):", "attribute of image in it's metadata.json delete_from_system(sess = None): deletes object in database", "Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): ''' Refreshes object image from db.", "database which generates path and id. ''' if self.uid == -1: db_image =", "updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self, layer_id: int): ''' Method", "which use the image. image_result_layers : List[IntImageResultLayer] empty list by default. List of", "Returns measurement object for given result layer and saves it to db and", ": List[IntResultMeasurement] empty list by default. List of all associated IntResultMeasurement objects tags", "[] set of string keywords to easily categorize objects in frontend. Methods -------", "measurement, measurement_summary = utils_results.calculate_measurement( image_array, layer.data) measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\",", "False in database. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if", "label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint", "None. Parameters: - layer_id(int): Id of result layer to be deleted. ''' layer", "passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint,", "list of length n_channel with mean intensity of measured pixels. ''' if self.has_bg_layer:", "in form {name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}: clf_dict[\"No classifers", "to the thumbnail on fileserver. get_image_scaling(): Returns dimensions normalized scales in array with", "by default. Indicates if image as layer selected as background_layer. bg_layer_id : int,", "in object. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no", "is passed into IntImage.on_init(). Thereafter, only image's metadata will be saved to .json", "if bg_mask.max() < 2: bg_mask = np.where(bg_mask > 0, 1, 0) n_pixel =", "hint. Parameters: - new_hint(str): string to be saved. - sess(sqlalchemy.orm.Session): The database session", "''' Returns object as int class. Parameters: - for_refresh(bool = False): If True,", "file (image series), the index of the image is stored here metadata :", "and file storage get_thumbnail_path(): Helper function which returns path to the thumbnail on", "as background_layer. bg_layer_id : int, optional None if no bg_layer selected, otherwise it", "Path class DbImage(BaseModel): ''' A class to handle database and file storage of", "attributes accordingly. Does not reload image data again ''' db_image = self.to_db_class() updated_info", "self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path): mask = utils_import.read_mask(path) if type(mask) == type(None): warnings.warn(\"Image", "str, sess=None): ''' This function expects a new hint as string and calls", "None\") return dims def to_db_class(self): ''' Transforms internal class representation to db class", "''' Helper function which returns path to the thumbnail on fileserver. Gets gets", "(z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None: Returns layer with corresponding id, returns", "in metadata. ''' x = self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z", "Does not reload image data again ''' db_image = self.to_db_class() updated_info = db_image.refresh_from_db()", "DbResultMeasurement objects tags: Set[str] = [] set of string keywords to easily categorize", "del kwargs[\"path_metadata\"] del kwargs[\"path_image\"] kwargs[\"metadata\"] = metadata kwargs[\"image_result_layers\"] = [image_result_layer.to_int_class() for image_result_layer in", "Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): ''' Refreshes object image from db. Parameters: - sess(sqlalchemy.orm.Session):", "False bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement]", "opens metadata.json and edits custom_channel_names Parameters: - channel_names(List[str]): List of strings to be", "As created by app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole series) Series metadatadict is", "Creates image in database which generates path and id. ''' if self.uid ==", "= self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel: int): ''' Helper method expects channel", "to be saved. - sess(sqlalchemy.orm.Session): The database session to be used, if no", "sess = check_sess(sess) sql_image = crud.create_image(self, sess) self.uid = sql_image.id self.path_image = Path(sql_image.path_image)", "is stored. Is loaded from .zarr files, most numpy operations work, some may", "to_int_class()->app.api.classes_internal.IntImage: returns object as int_class. Loads layer array from file path in the", "= self.data.shape if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\",", "estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches given layers and uses SimpleITKs STAPLE algorithm to", "found in self.image_result_layers calculate_background() -> List: Returns list of length n_channels. List holds", "has_bg_layer: bool = False bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] =", "= db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename # save", "name : str the objects name series_index : int if multiple images are", "0) n_pixel = bg_mask.sum() n_channel = self.data.shape[1] mean_pixel = [] for n in", "{uid}.\\nThis image has the associated layers \\n{layers}\", UserWarning) return None def calculate_background(self): '''", "= self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail thumbnail = utils_import.generate_thumbnail(self.data)", "path_xml) elif self.uid == -2: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid print(f\"Importing", "dict in form {name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if clf_dict == {}: clf_dict[\"No", "= updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self,", "from file storage. ''' kwargs = self.dict() # Only load the full image", "be background. Returns list of length n_channel with mean intensity of measured pixels.", "self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for measurement in self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] =", "bg_uid = self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask = bg_layer.data else: bg_mask = np.zeros((", "layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask shape {mask.shape} does not", "be measured ''' image_array = self.data layer = self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement(", "file path in the process. to_com_class()->app.api.classes_com.ComImage: returns object as com_class. set_bg_false(sess = None)", "updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id", "representation. ''' db_image_result_layers = [result_layer.to_db_class() for result_layer in self.image_result_layers] db_result_measurements = [measurement.to_db_class() for", "no background layer is defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns measurement object for", "self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid == -2: db_image =", "If result layer is selected as background layer, the attributes \"has_bg_layer\" and \"bg_layer_id\"", "else: image_shape = self.data.shape if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer(", "= np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] = mask int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported", "self.data.shape if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported", "db. ''' db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer): ''' Method", "[measurement.to_com_class() for measurement in self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] =", "import check_sess from pydantic import BaseModel, constr from pathlib import Path class DbImage(BaseModel):", "session is passed default session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess)", "original_filename = self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename # save zarr fsr.save_zarr(self.data,", "self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape == (1, image_shape[-2], image_shape[-1]): _mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1]))", "experiments_group ids which use the image. image_result_layers : List[IntImageResultLayer] empty list by default.", "''' Expects the bg_uid to belong to a result layer of this image.", "set to False and None. Parameters: - layer_id(int): Id of result layer to", "n_channel, n_features), n_features == 2 (n_pixels, sum_pixels) Parameters: - layer_id(int): uid of the", "images bg_layer_id property to given value. Parameters: - layer_uid(int): uid of result layer", "app.api.cfg_classes.classifier_types. ''' # Fetches dict in form {name: id} clf_dict = crud.read_classifier_dict_by_type(clf_type) if", "ground truth: {layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array )", "Set[str] = set() data: Any metadata_omexml: Any has_bg_layer: bool = False bg_layer_id: Optional[int]", "objects unique identifier series_index : int index of image if multiple images were", "check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid: int, sess=None): ''' Sets images bg_layer_id property", "object experiment_ids: List[int] empty list by default. List of experiments_group ids which use", "type(None): warnings.warn(\"Image could not be read!\") return None else: image_shape = self.data.shape if", "in napari viewer. Parameters: - clf_type(str): Valid classifier type, for available types see", "_mask = np.zeros((image_shape[0], image_shape[-2], image_shape[-1])) _mask[:,...] = mask int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\",", "hint in database. Uses default session if none is passed. update_channel_names(channel_names: List[str]) edits", "be unified and represented as foreground (==1) for calculation. Saves label layer to", "metadata.json and edits custom_channel_names Parameters: - channel_names(List[str]): List of strings to be saved", "will be unified and represented as foreground (==1) for calculation. Saves label layer", "refresh_from_db(self, sess=None): ''' Refreshes object image from db. Parameters: - sess(sqlalchemy.orm.Session): The database", "brief description of the object experiment_ids: List[int] empty list by default. List of", "ground truth estimation. Must belong to this image. - suffix(str): will be appended", "and passed db_image object to delete all associated files and db entries Parameters:", "object's attributes accordingly. Does not reload image data again ''' db_image = self.to_db_class()", "self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail,", "| None: Returns layer with corresponding id, returns None if id is not", "get_classifiers(clf_type: str) -> dict: Fetches all saved classifiers from db, filters for type", "db_image.create_in_db() self.uid = db_image.uid print(f\"Importing archived Mistos image with id {self.uid}\") fsr.save_zarr(self.data, db_image.path_image)", "as foreground (==1) for calculation. Saves label layer to image, database and file", "str) -> dict: Fetches all saved classifiers from db, filters for type and", "not already loaded if for_refresh == False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data", "range(n_channel): channel_data = self.select_channel(n) selection = np.where(bg_mask, channel_data, 0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean)", "import xml from pathlib import Path from typing import Any, List, Optional, Set", "path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement] = [] tags: Set[str] =", "x]) dims = dims/dims.max() else: dims = None print(\"Couldn't calculate scaling from metadata,", "clf_type: str): ''' Loads all classifiers of given type from database. Returns dictionary", "IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement, IntResultMeasurement) from app.api.classes_com import ComImage from app.api.dependencies import", "y, x]) dims = dims/dims.max() elif n_z == 1: dims = np.array([y, x])", "trouble. metadata_omexml : Any original metadata xml data as read by bioformats import", "{\"UID_NAME\": UID}. Mainly for use in napari viewer. Parameters: - clf_type(str): Valid classifier", "if none is passed. ''' uid: int series_index: int name: str hint: str", "as background layer, the attributes \"has_bg_layer\" and \"bg_layer_id\" are set to False and", "all labels > 0 to be background. Returns list of length n_channel with", "layer, the attributes \"has_bg_layer\" and \"bg_layer_id\" are set to False and None. Parameters:", "save thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): '''", "in db. set_bg_true(layer_uid: int, sess = None) sets \"has_bg_layer\" property to True in", "mean_pixel = [] for n in range(n_channel): channel_data = self.select_channel(n) selection = np.where(bg_mask,", "associated files and db entries Parameters: - sess(sqlalchemy.orm.Session): The database session to be", "has the associated layers \\n{layers}\", UserWarning) return None def calculate_background(self): ''' Expects the", "self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def delete_result_layer(self, layer_id: int): ''' Method to", "xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid == -2: db_image = self.to_db_class() db_image.create_in_db() self.uid", "metadata = fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path) del kwargs[\"path_metadata\"] del", "[measurement.to_db_class() for measurement in self.result_measurements] db_image = DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None,", "layer_id).to_int_class().data for layer_id in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint = f\"Following Label", "with return value from utils_paths.make_thumbnail_path Returns path as pathlib.Path ''' return utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def", "self.select_channel(n) selection = np.where(bg_mask, channel_data, 0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel def", "associated layers \\n{layers}\", UserWarning) return None def calculate_background(self): ''' Expects the bg_uid to", "layer selected as background_layer. bg_layer_id : int, optional None if no bg_layer selected,", "db_result_measurements = [measurement.to_db_class() for measurement in self.result_measurements] db_image = DbImage( uid=self.uid, series_index=self.series_index, name=self.name,", "uid. If result layer is selected as background layer, the attributes \"has_bg_layer\" and", "only image's metadata will be saved to .json and loaded. hint : str,", "channel_names: List[str]): ''' This function expects a new channel names as list of", "to given value. Parameters: - layer_uid(int): uid of result layer to be used", "hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask shape", "label_array_list) hint = f\"Following Label Layers were used to estimate the ground truth:", "property to True in db. sets bg_layer_id to given value. create_in_db(sess = None):", "self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask shape {mask.shape} does not match image shape {image_shape}\")", "in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for measurement in self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"]", "Returns dimensions normalized scales in array with shape (z,y,x) or None. to_db_class() ->", ": Set[str]: empty set by default. Set of keywords to work with in", "List[IntImageResultLayer] empty list by default. List of all associated IntImageResultLayer objects. result_measurements :", "to initialize the object. Handles image as new image if \"uid\" == -1", "and bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set layer as background layer", "returns path to the thumbnail on fileserver. Gets gets fileserver path and joins", "n_channels. List holds the mean pixel values for each channel if background layer", "files and db entries Parameters: - sess(sqlalchemy.orm.Session): The database session to be used,", "str the objects name series_index : int if multiple images are imported via", "IntResultMeasurement objects tags : Set[str]: empty set by default. Set of keywords to", "{\"UID_NAME\": UID} refresh_from_db(): Fetches data of this image from db and updates the", "List of all associated DbImageResultLayer objects measurements: List[DbResultMeasurement] = [] emtpy list by", "save result as ResultMeasurement. Creates measurement object and initializes it (save to db", "Requests current information from db and updates the object's attributes accordingly. Does not", "object image from db. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used,", "-> app.api.classes_db.DbImage: Returns object as DbImage object. set_bg_false(): Helper function to set has_bg_layer", "crud.delete_image(self, sess) class IntImage(BaseModel): ''' A class to handle calculations and other internal", "bg_layer selected, otherwise it holds the bg_layer_id Methods ------- on_init(): Initializes object. Object", "warnings.warn( f\"IntImage.select channel could not select layer with id {uid}.\\nThis image has the", "Deletes the layer from database, file storage and the image. If layer was", "False and bg_layer_id to None. Attribute is changed in db, then object attributes", "keywords to work with in the frontend data : Any ___TO BE DONE:", "default. List of all associated IntImageResultLayer objects. result_measurements : List[IntResultMeasurement] empty list by", "\".zarr\" folder. Automatically generated as image is saved to database. image_result_layers: List[DbImageResultLayer] =", "associated background layer. bg_layer_id: int, optional None if no associated background layer, otherwise", "copy of channel with shape (z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None: Returns", "else: suffix = \"_\" + suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in", "[result_layer.to_db_class() for result_layer in self.image_result_layers] db_result_measurements = [measurement.to_db_class() for measurement in self.result_measurements] db_image", "layer_id == self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str =", "IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self,", "and other internal operations with images. Attributes ---------- uid : int the objects", "Mistos image if \"uid\" == -2. Creates image in database which generates path", "default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) updated_db_image = crud.read_image_by_uid(", "if not already loaded if for_refresh == False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] =", "set layer as background layer select_channel(channel: int) -> np.array: Helper method expects channel", "= check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid: int, sess=None): ''' Sets images bg_layer_id", "- channel(int): index of channel to be selected. ''' channel_data = copy.deepcopy(self.data[:, channel,", "string keywords to easily categorize objects in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object", "full image if not already loaded if for_refresh == False: data = fsr.load_zarr(kwargs[\"path_image\"])", "for_refresh(bool = False): If True, image array is not reloaded from file storage.", "= image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel: int): ''' Helper", "delete_result_layer(self, layer_id: int): ''' Method to delete a result layer by uid. If", "in db. Paths and id are generated and updated in object. Parameters: -", "Valid classifier type, for available types see app.api.cfg_classes.classifier_types. ''' # Fetches dict in", "measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns measurement object for given result layer and saves", "List[IntResultMeasurement] empty list by default. List of all associated IntResultMeasurement objects tags :", "update_hint(new_hint: str, sess = None): updates objects hint in database. Uses default session", "file name : str the objects name hint : str = \"\" empty", "self.select_result_layer(layer_id) if layer_id == self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix:", "Optional, Set import numpy as np from app import crud from app import", "class representation. ''' db_image_result_layers = [result_layer.to_db_class() for result_layer in self.image_result_layers] db_result_measurements = [measurement.to_db_class()", "not reload image data again ''' db_image = self.to_db_class() updated_info = db_image.refresh_from_db() self.name", "False and None. Parameters: - layer_id(int): Id of result layer to be deleted.", "int): ''' Method to measure mask and save result as ResultMeasurement. Creates measurement", "= IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid)", "= self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path()", "session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def", "List, Optional, Set import numpy as np from app import crud from app", "str = \"\" has_bg_layer: bool = False bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image: Optional[Path]", "layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_mask(self, path: Path): mask = utils_import.read_mask(path)", "''' uid: int name: str series_index: int metadata: dict hint: Optional[str] = \"\"", "= [] measurements: List[DbResultMeasurement] = [] tags: Set[str] = [] def to_int_class(self, for_refresh=False):", "dict: Fetches all saved classifiers from db, filters for type and returns dictionary", "as channel names. ''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata) def", "def delete_result_layer(self, layer_id: int): ''' Method to delete a result layer by uid.", "= self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement( image_array, layer.data) measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name,", "to estimate ground truth from multiple layers with by SimpleITK's STAPLE probabilities. For", ") measurement_result.on_init() self.refresh_from_db() return measurement_result def get_classifiers(self, clf_type: str): ''' Loads all classifiers", "is saved to database. path_image: pathlib.Path, optional path to the images array \".zarr\"", "dimensions and returns relative dimensions. Returns dimensions normalized scales in array with shape", "return mean_pixel def measure_mask_in_image(self, layer_id: int): ''' Method to measure mask and save", "of format {\"UID_NAME\": UID}. Mainly for use in napari viewer. Parameters: - clf_type(str):", "filters for type and returns dictionary of format {\"UID_NAME\": UID} refresh_from_db(): Fetches data", "classifers found\"] = None return clf_dict def refresh_from_db(self): ''' Requests current information from", "= Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): ''' Refreshes object image from db. Parameters: -", "defaulting to None\") return dims def to_db_class(self): ''' Transforms internal class representation to", "calculation. Saves label layer to image, database and file storage. Parameters: - layer_id_list(List[int]):", "in range(n_channel): channel_data = self.select_channel(n) selection = np.where(bg_mask, channel_data, 0) _mean = selection.sum()/n_pixel", "read!\") return None else: image_shape = self.data.shape if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]):", "None) sets \"has_bg_layer\" property to True in db. sets bg_layer_id to given value.", "Method to estimate ground truth from multiple layers with by SimpleITK's STAPLE probabilities.", "self.to_db_class() db_image.create_in_db() self.uid = db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] =", "the thumbnail on fileserver. get_image_scaling(): Returns dimensions normalized scales in array with shape", "the objects unique identifier name : str the objects name series_index : int", "easily categorize objects in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object as int_class. Loads", "Any has_bg_layer: bool = False bg_layer_id: Optional[int] def on_init(self): ''' Method to initialize", "storage get_thumbnail_path(): Helper function which returns path to the thumbnail on fileserver. get_image_scaling():", "- sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed", "np.array([y, x]) dims = dims/dims.max() else: dims = None print(\"Couldn't calculate scaling from", "of channel with shape (z,y,x). Parameters: - channel(int): index of channel to be", "IntResultMeasurement) from app.api.classes_com import ComImage from app.api.dependencies import check_sess from pydantic import BaseModel,", "path: Path): mask = utils_import.read_mask(path) if type(mask) == type(None): warnings.warn(\"Image could not be", "emtpy list by default. List of all associated DbResultMeasurement objects tags: Set[str] =", "self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class() for image_result_layer in self.image_result_layers] kwargs[\"measurements\"]", "objects name hint : str = \"\" empty string by default. brief description", "to False and bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set layer as", "self.data.shape) int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init()", "def get_image_scaling(self): ''' Reads pixel dimensions and returns relative dimensions. Returns dimensions normalized", "ComImage(**kwargs) def set_bg_false(self, sess=None): ''' Sets imagaes has_bg_layer property to False in database.", "saved classifiers from db, filters for type and returns dictionary of format {\"UID_NAME\":", "db and updates the object's attributes accordingly. Does not reload image data again", "crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self, channel_names: List[str]): ''' This function expects a new", "def to_db_class(self): ''' Transforms internal class representation to db class representation. ''' db_image_result_layers", "(n_labels, n_channel, n_features), n_features == 2 (n_pixels, sum_pixels) Parameters: - layer_id(int): uid of", "name hint : str = \"\" empty string by default. brief description of", "uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) elif mask.shape", "if no associated background layer, otherwise id of the background layer. path_metadata: pathlib.Path,", "Optional[Path] image_result_layers: List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement] = [] tags: Set[str] = []", "is not reloaded from file storage. ''' kwargs = self.dict() # Only load", "= utils_results.staple_gte( label_array_list) hint = f\"Following Label Layers were used to estimate the", "frontend data : Any ___TO BE DONE: add custom field type___ array of", "to database. path_image: pathlib.Path, optional path to the images array \".zarr\" folder. Automatically", "np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if bg_mask.max() < 2: bg_mask = np.where(bg_mask >", "with shape (z,y,x) or None. to_db_class() -> app.api.classes_db.DbImage: Returns object as DbImage object.", "If layer was background_layer, corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches", "- layer_id_list(List[int]): List of layer ids to be used for ground truth estimation.", "and returns relative dimensions. Returns dimensions normalized scales in array with shape (z,y,x)", "= [measurement.to_com_class() for measurement in self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"]", "to db and file storage) Returns IntResultMeasurement object: measurement.measurement has shape: (n_labels, n_channel,", "measurement in self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"]", "warnings import xml from pathlib import Path from typing import Any, List, Optional,", "reload image data again ''' db_image = self.to_db_class() updated_info = db_image.refresh_from_db() self.name =", "it (save to db and file storage) Returns IntResultMeasurement object: measurement.measurement has shape:", "and initializes it (save to db and file storage) Returns IntResultMeasurement object: measurement.measurement", "value. create_in_db(sess = None): creates object in database, updates objects path and uid", "scales in array with shape (z,y,x) or None. to_db_class() -> app.api.classes_db.DbImage: Returns object", "zarr fsr.save_zarr(self.data, db_image.path_image) # save metadata dict fsr.save_json(self.metadata, db_image.path_metadata) # save metadata xml", "(app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self, channel_names: List[str]): '''", "save zarr fsr.save_zarr(self.data, db_image.path_image) # save metadata dict fsr.save_json(self.metadata, db_image.path_metadata) # save metadata", "DbImage Fetches image from database and returns DbImage object. update_hint(new_hint: str, sess =", "Reads pixel dimensions and returns relative dimensions. Returns dimensions normalized scales in array", "(z,y,x) or None. to_db_class() -> app.api.classes_db.DbImage: Returns object as DbImage object. set_bg_false(): Helper", "db_image.path_image) # save metadata dict fsr.save_json(self.metadata, db_image.path_metadata) # save metadata xml path_xml =", "= dims/dims.max() elif n_z == 1: dims = np.array([y, x]) dims = dims/dims.max()", "of this image. Result layer will be turned to binary, assuming all labels", "mask = utils_import.read_mask(path) if type(mask) == type(None): warnings.warn(\"Image could not be read!\") return", "names. ''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None):", "self.uid == -2: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid print(f\"Importing archived Mistos", "object: measurement.measurement has shape: (n_labels, n_channel, n_features), n_features == 2 (n_pixels, sum_pixels) Parameters:", "xml from pathlib import Path from typing import Any, List, Optional, Set import", "db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml =", "f\"IntImage.select channel could not select layer with id {uid}.\\nThis image has the associated", "uid: int): layers = [_ for _ in self.image_result_layers if _.uid == uid]", "to False and None. Parameters: - layer_id(int): Id of result layer to be", "Returns object as DbImage object. set_bg_false(): Helper function to set has_bg_layer to False", "none is passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute of image in it's metadata.json", "series), the index of the image is stored here metadata : dict reduced", "mask\", image_id=self.uid, layer_type=\"labels\", data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask shape {mask.shape}", "strings. opens metadata.json and edits custom_channel_names Parameters: - channel_names(List[str]): List of strings to", "= sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): ''' Refreshes", "objects tags : Set[str]: empty set by default. Set of keywords to work", "metadata dict fsr.save_json(self.metadata, db_image.path_metadata) # save metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string", "representation to db class representation. ''' db_image_result_layers = [result_layer.to_db_class() for result_layer in self.image_result_layers]", "bg_mask = np.where(bg_mask > 0, 1, 0) n_pixel = bg_mask.sum() n_channel = self.data.shape[1]", "clf_dict == {}: clf_dict[\"No classifers found\"] = None return clf_dict def refresh_from_db(self): '''", "again ''' db_image = self.to_db_class() updated_info = db_image.refresh_from_db() self.name = updated_info.name self.hint =", "object. set_bg_false(): Helper function to set has_bg_layer to False and bg_layer_id to None.", "and calls crud.update_image_hint to update the image hint. Parameters: - new_hint(str): string to", "session if none is passed. update_channel_names(channel_names: List[str]) edits \"custom_channel_names\" attribute of image in", "constr from pathlib import Path class DbImage(BaseModel): ''' A class to handle database", "was background_layer, corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int], suffix: str): Fetches given layers", "background layer. bg_layer_id: int, optional None if no associated background layer, otherwise id", "db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel: int): ''' Helper method expects", "SimpleITK's STAPLE probabilities. For ground truth estimation layer will be binarized, all labels", "self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if bg_mask.max() < 2: bg_mask = np.where(bg_mask > 0,", "image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask )", "images were imported in a single file name : str the objects name", "List[str]) edits \"custom_channel_names\" attribute of image in it's metadata.json delete_from_system(sess = None): deletes", "db. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no session", "reloaded from file storage. ''' kwargs = self.dict() # Only load the full", "for_refresh=False): ''' Returns object as int class. Parameters: - for_refresh(bool = False): If", "as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int name: str series_index: int metadata: dict", "estimate ground truth from multiple layers with by SimpleITK's STAPLE probabilities. For ground", "objects in frontend. Methods ------- to_int_class()->app.api.classes_internal.IntImage: returns object as int_class. Loads layer array", "check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self, channel_names: List[str]): ''' This function expects a", "object to delete all associated files and db entries Parameters: - sess(sqlalchemy.orm.Session): The", "returns object as int_class. Loads layer array from file path in the process.", "= mask int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=_mask )", "in self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] =", "imported in a single file name : str the objects name hint :", "image_result_layers: List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement] = [] tags: Set[str] = set() data:", "= self.select_channel(n) selection = np.where(bg_mask, channel_data, 0) _mean = selection.sum()/n_pixel mean_pixel.append(_mean) return mean_pixel", "used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self, channel_names: List[str]):", "app.api import utils_import, utils_paths, utils_results from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import", "def set_bg_true(self, layer_uid: int, sess=None): ''' Sets images bg_layer_id property to given value.", "- for_refresh(bool = False): If True, image array is not reloaded from file", "= np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if bg_mask.max() < 2: bg_mask = np.where(bg_mask", "layer is defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns measurement object for given result", "object in database, updates objects path and uid attributes accordingly. Uses default session", "will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid:", "Result layer will be turned to binary, assuming all labels > 0 to", "layer = self.select_result_layer(layer_id) measurement, measurement_summary = utils_results.calculate_measurement( image_array, layer.data) measurement_result = IntResultMeasurement( uid=-1,", "str, optional empty string by default. brief description of the object experiment_ids: List[int]", "channel, ...])[ :, np.newaxis, ...] return channel_data def select_result_layer(self, uid: int): layers =", "truth estimation. Must belong to this image. - suffix(str): will be appended to", "database, updates objects path and uid attributes accordingly. Uses default session if none", "if id is not found in self.image_result_layers calculate_background() -> List: Returns list of", "List[IntResultMeasurement] = [] tags: Set[str] = set() data: Any metadata_omexml: Any has_bg_layer: bool", "thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self): ''' Helper function", "= np.array([y, x]) dims = dims/dims.max() else: dims = None print(\"Couldn't calculate scaling", "background layer, the attributes \"has_bg_layer\" and \"bg_layer_id\" are set to False and None.", "if self.uid == -1: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid original_filename =", "utils_paths.fileserver.joinpath(utils_paths.make_thumbnail_path(self.uid)) def get_image_scaling(self): ''' Reads pixel dimensions and returns relative dimensions. Returns dimensions", "path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return db_image def set_bg_false(self): '''", "custom_channel_names Parameters: - channel_names(List[str]): List of strings to be saved as channel names.", "list by default. List of all associated DbImageResultLayer objects measurements: List[DbResultMeasurement] = []", "an associated background layer. bg_layer_id: int, optional None if no associated background layer,", "''' Refreshes object image from db. Parameters: - sess(sqlalchemy.orm.Session): The database session to", "= data else: kwargs[\"data\"] = None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata)", "image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel: int): ''' Helper method", "return layers[0] else: warnings.warn( f\"IntImage.select channel could not select layer with id {uid}.\\nThis", "to_com_class(self): ''' Returns obect as com class. ''' kwargs = self.dict() kwargs[\"metadata\"] =", "= self.metadata[\"original_filename\"] self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename # save zarr fsr.save_zarr(self.data, db_image.path_image)", "''' Method to delete a result layer by uid. If result layer is", "is passed default session will be used (app.api.dependencies.get_db). ''' sess = check_sess(sess) crud.update_image_bg_false(self.uid,", "storage. get_classifiers(clf_type: str) -> dict: Fetches all saved classifiers from db, filters for", "(n_pixels, sum_pixels) Parameters: - layer_id(int): uid of the layer to be measured '''", "object as int class. Parameters: - for_refresh(bool = False): If True, image array", "class. Parameters: - for_refresh(bool = False): If True, image array is not reloaded", "and represented as foreground (==1) for calculation. Saves label layer to image, database", "object. Handles image as new image if \"uid\" == -1 and as imported", "objects measurements: List[DbResultMeasurement] = [] emtpy list by default. List of all associated", "import BaseModel, constr from pathlib import Path class DbImage(BaseModel): ''' A class to", "self.result_measurements = updated_info.result_measurements self.tags = updated_info.tags self.has_bg_layer = updated_info.has_bg_layer self.bg_layer_id = updated_info.bg_layer_id def", "which returns path to the thumbnail on fileserver. get_image_scaling(): Returns dimensions normalized scales", "binary, assuming all labels > 0 to be background. Returns list of length", "object. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used, if no session", "''' Helper function to set has_bg_layer to False and bg_layer_id to None. Attribute", "check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid, sess, for_refresh=True) return updated_db_image def update_hint(self, new_hint: str,", "= Path(sql_image.path_image) self.path_metadata = Path(sql_image.path_metadata) def refresh_from_db(self, sess=None): ''' Refreshes object image from", "fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None): ''' calls crud.delete_image and passed db_image object to", "experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return db_image def set_bg_false(self): ''' Helper function to", "updated_info = db_image.refresh_from_db() self.name = updated_info.name self.hint = updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers", "set_bg_true(self, layer_uid: int, sess=None): ''' Sets images bg_layer_id property to given value. Parameters:", "layer to image, database and file storage. Parameters: - layer_id_list(List[int]): List of layer", "be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self, channel_names:", "suffix = \"\" else: suffix = \"_\" + suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data", "def select_channel(self, channel: int): ''' Helper method expects channel index. Returns deep copy", "default. List of all associated DbImageResultLayer objects measurements: List[DbResultMeasurement] = [] emtpy list", "self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs) def set_bg_false(self, sess=None): '''", "all labels > 0 will be unified and represented as foreground (==1) for", "database. Returns dictionary of format {\"UID_NAME\": UID}. Mainly for use in napari viewer.", ")) if bg_mask.max() < 2: bg_mask = np.where(bg_mask > 0, 1, 0) n_pixel", "within mistos. As created by app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole series) Series", "IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int name: str series_index: int metadata: dict hint:", "None def calculate_background(self): ''' Expects the bg_uid to belong to a result layer", "-1: db_image = self.to_db_class() db_image.create_in_db() self.uid = db_image.uid original_filename = self.metadata[\"original_filename\"] self.metadata =", "sess) class IntImage(BaseModel): ''' A class to handle calculations and other internal operations", "from .zarr files, most numpy operations work, some may cause trouble. metadata_omexml :", "self.metadata[\"original_filename\"] = original_filename # save zarr fsr.save_zarr(self.data, db_image.path_image) # save metadata dict fsr.save_json(self.metadata,", "= self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer): ''' Method to set layer", "Saves label layer to image, database and file storage. Parameters: - layer_id_list(List[int]): List", "db, filters for type and returns dictionary of format {\"UID_NAME\": UID} refresh_from_db(): Fetches", "the object. Handles image as new image if \"uid\" == -1 and as", "sess) def update_channel_names(self, channel_names: List[str]): ''' This function expects a new channel names", "measurement object and initializes it (save to db and file storage) Returns IntResultMeasurement", "Layer to be selected as background layer. ''' layer_uid = image_layer.uid db_image =", "operations work, some may cause trouble. metadata_omexml : Any original metadata xml data", "= crud.read_image_by_uid( self.uid, sess, for_refresh=True) return updated_db_image def update_hint(self, new_hint: str, sess=None): '''", "-> DbImage Fetches image from database and returns DbImage object. update_hint(new_hint: str, sess", "Is loaded from .zarr files, most numpy operations work, some may cause trouble.", "Layers were used to estimate the ground truth: {layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1,", "as com_class. set_bg_false(sess = None) sets \"has_bg_layer\" property to False in db. set_bg_true(layer_uid:", "self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path): mask = utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer(", "channel to be selected. ''' channel_data = copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis, ...]", "the object's attributes accordingly. Does not reload image data again ''' db_image =", "by default. List of all associated IntResultMeasurement objects tags : Set[str]: empty set", "of the layer to be measured ''' image_array = self.data layer = self.select_result_layer(layer_id)", "str = None): ''' Method to estimate ground truth from multiple layers with", "= IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid)", "dims def to_db_class(self): ''' Transforms internal class representation to db class representation. '''", "experiment_ids: List[int] = [] image_result_layers: List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement] = [] tags:", "hint=hint, image_id=self.uid, layer_type=\"labels\", data=ground_truth_estimation_array ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path): mask", "loaded from .zarr files, most numpy operations work, some may cause trouble. metadata_omexml", "= bg_layer.data else: bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if bg_mask.max() <", "is passed. ''' uid: int series_index: int name: str hint: str = \"\"", "be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid, sess, for_refresh=True)", "------- on_init(): Initializes object. Object is saved in database and file storage get_thumbnail_path():", "Must belong to this image. - suffix(str): will be appended to layer name.", "create_in_db(self, sess=None): ''' Creates object in db. Paths and id are generated and", "the frontend data : Any ___TO BE DONE: add custom field type___ array", "thumbnail on fileserver. get_image_scaling(): Returns dimensions normalized scales in array with shape (z,y,x)", "= check_sess(sess) updated_db_image = crud.read_image_by_uid( self.uid, sess, for_refresh=True) return updated_db_image def update_hint(self, new_hint:", "= check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self, sess=None): ''' Creates object in db.", "channel_data def select_result_layer(self, uid: int): layers = [_ for _ in self.image_result_layers if", "\"bg_layer_id\" are set to False and None. Parameters: - layer_id(int): Id of result", "new image if \"uid\" == -1 and as imported Mistos image if \"uid\"", "length n_channels. List holds the mean pixel values for each channel if background", "# save thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path) def get_thumbnail_path(self):", "int_class. Loads layer array from file path in the process. to_com_class()->app.api.classes_com.ComImage: returns object", "will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self,", "of the object experiment_ids: List[int] empty list by default. List of experiments_group ids", "empty string by default. brief description of the object has_bg_layer: bool = False", "result layer to be deleted. ''' layer = self.select_result_layer(layer_id) if layer_id == self.bg_layer_id:", "used to estimate the ground truth: {layer_id_list}\" int_result_layer = IntImageResultLayer( uid=-1, name=f\"ground_truth_estimation{suffix}\", hint=hint,", "image_array, layer.data) measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary", "to_db_class(self): ''' Transforms internal class representation to db class representation. ''' db_image_result_layers =", "= self.to_db_class() db_image.create_in_db() self.uid = db_image.uid print(f\"Importing archived Mistos image with id {self.uid}\")", "tags: Set[str] = [] set of string keywords to easily categorize objects in", "channel names. ''' metadata = fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self,", "array \".zarr\" folder. Automatically generated as image is saved to database. image_result_layers: List[DbImageResultLayer]", "the bg_layer_id Methods ------- on_init(): Initializes object. Object is saved in database and", "for ground truth estimation. Must belong to this image. - suffix(str): will be", "import utils_import, utils_paths, utils_results from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer) from app.api.classes.result_measurement import (DbResultMeasurement,", "str series_index: int metadata: dict hint: Optional[str] = \"\" experiment_ids: List[int] = []", "format {\"UID_NAME\": UID} refresh_from_db(): Fetches data of this image from db and updates", "data: Any metadata_omexml: Any has_bg_layer: bool = False bg_layer_id: Optional[int] def on_init(self): '''", "all associated IntImageResultLayer objects. result_measurements : List[IntResultMeasurement] empty list by default. List of", "\"\" else: suffix = \"_\" + suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id", "db_image = DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers,", "\".json\". Automatically generated as image is saved to database. path_image: pathlib.Path, optional path", "self.hint = updated_info.hint self.experiment_ids = updated_info.experiment_ids self.image_result_layers = updated_info.image_result_layers self.result_measurements = updated_info.result_measurements self.tags", "= \"\" else: suffix = \"_\" + suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for", "new hint as string and calls crud.update_image_hint to update the image hint. Parameters:", "measurements: List[DbResultMeasurement] = [] tags: Set[str] = [] def to_int_class(self, for_refresh=False): ''' Returns", "truth. Resulting layer will be initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int", "n_channel = self.data.shape[1] mean_pixel = [] for n in range(n_channel): channel_data = self.select_channel(n)", "== (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\",", "given layers and uses SimpleITKs STAPLE algorithm to estimate ground truth. Resulting layer", "self.metadata['pixel_size_z'] if n_z > 1: dims = np.array([z, y, x]) dims = dims/dims.max()", "attributes are reloaded from db. ''' db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self,", "pixels. ''' if self.has_bg_layer: bg_uid = self.bg_layer_id bg_layer = self.select_result_layer(bg_uid) bg_mask = bg_layer.data", "sess(sqlalchemy.orm.Session): The database session to be used, if no session is passed default", "with shape (z,y,x) or None if no scaling information was provided in metadata.", "self.select_result_layer(bg_uid) bg_mask = bg_layer.data else: bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if", "return measurement_result def get_classifiers(self, clf_type: str): ''' Loads all classifiers of given type", "path and uid attributes accordingly. Uses default session if none is passed. refresh_from_db()", "as int class. Parameters: - for_refresh(bool = False): If True, image array is", "on_init(self): ''' Method to initialize the object. Handles image as new image if", "belong to this image. - suffix(str): will be appended to layer name. '''", "as int_class. Loads layer array from file path in the process. to_com_class()->app.api.classes_com.ComImage: returns", "= None): updates objects hint in database. Uses default session if none is", "if no session is passed default session will be used (app.api.dependencies.get_db). ''' sess", "measurement_result = IntResultMeasurement( uid=-1, name=utils_paths.make_measurement_name(self.name, layer.name), hint=\"\", image_id=self.uid, result_layer_id=layer.uid, measurement=measurement, measurement_summary=measurement_summary ) measurement_result.on_init()", "classifiers of given type from database. Returns dictionary of format {\"UID_NAME\": UID}. Mainly", "deep copy of channel with shape (z,y,x). select_result_layer(uid: int) -> app.api.classes_internal.IntResultLayer | None:", "None else: image_shape = self.data.shape if mask.shape == (image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer =", "layer with corresponding id, returns None if id is not found in self.image_result_layers", "int name: str hint: str = \"\" has_bg_layer: bool = False bg_layer_id: Optional[int]", "database. image_result_layers: List[DbImageResultLayer] = [] emtpy list by default. List of all associated", "kwargs[\"data\"] = None metadata = fsr.load_json(self.path_metadata) metadata_omexml_path = utils_paths.make_metadata_xml_path_from_json_path( self.path_metadata) kwargs[\"metadata_omexml\"] = fsr.load_metadata_xml(metadata_omexml_path)", "== self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self, layer_id_list: List[int], suffix: str = None):", "from database, file storage and the image. If layer was background_layer, corresponding attributes", ": int if multiple images are imported via one file (image series), the", "\"_\" + suffix label_array_list = [crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in layer_id_list] ground_truth_estimation_array =", "session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.delete_image(self, sess) class IntImage(BaseModel):", "this image. Result layer will be turned to binary, assuming all labels >", "Sets imagaes has_bg_layer property to False in database. Parameters: - sess(sqlalchemy.orm.Session): The database", "set has_bg_layer to False and bg_layer_id to None. set_bg_true(image_layer: app.api.classes_int.IntImageResultLayer): Method to set", "import Path from typing import Any, List, Optional, Set import numpy as np", "path_metadata: pathlib.Path, optional path to the images metadata \".json\". Automatically generated as image", "metadata_omexml : Any original metadata xml data as read by bioformats import when", "channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None): ''' calls crud.delete_image and passed db_image object", "{self.uid}\") fsr.save_zarr(self.data, db_image.path_image) fsr.save_json(self.metadata, db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\")", "associated IntImageResultLayer objects. result_measurements : List[IntResultMeasurement] empty list by default. List of all", "series_index: int name: str hint: str = \"\" has_bg_layer: bool = False bg_layer_id:", "the image is stored here metadata : dict reduced metadata for easy use", "crud.update_image_hint to update the image hint. Parameters: - new_hint(str): string to be saved.", "image_result_layer in self.image_result_layers] kwargs[\"measurements\"] = [measurement.to_com_class() for measurement in self.measurements] kwargs[\"seriesIndex\"] = self.series_index", "(z,y,x) or None if no scaling information was provided in metadata. ''' x", "None): updates objects hint in database. Uses default session if none is passed.", "measurement_result.on_init() self.refresh_from_db() return measurement_result def get_classifiers(self, clf_type: str): ''' Loads all classifiers of", "''' db_image = self.to_db_class() db_image.set_bg_false() self.refresh_from_db() def set_bg_true(self, image_layer: IntImageResultLayer): ''' Method to", "= self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs) def set_bg_false(self, sess=None):", "be initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int name: str series_index: int", "n_pixel = bg_mask.sum() n_channel = self.data.shape[1] mean_pixel = [] for n in range(n_channel):", "has_bg_layer : bool False by default. Indicates if image as layer selected as", "as fsr from app.api import utils_import, utils_paths, utils_results from app.api.classes.image_result_layer import (DbImageResultLayer, IntImageResultLayer)", "BaseModel, constr from pathlib import Path class DbImage(BaseModel): ''' A class to handle", "return IntImage(**kwargs) def to_com_class(self): ''' Returns obect as com class. ''' kwargs =", "the bg_uid to belong to a result layer of this image. Result layer", "self.metadata = self.metadata[\"images\"][self.series_index] self.metadata[\"original_filename\"] = original_filename # save zarr fsr.save_zarr(self.data, db_image.path_image) # save", "and None. Parameters: - layer_id(int): Id of result layer to be deleted. '''", "object as DbImage object. set_bg_false(): Helper function to set has_bg_layer to False and", "and updated in object. Parameters: - sess(sqlalchemy.orm.Session): The database session to be used,", "f\"Following Label Layers were used to estimate the ground truth: {layer_id_list}\" int_result_layer =", "channel_data = copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis, ...] return channel_data def select_result_layer(self, uid:", "field type___ array of shape (z,c,y,x) in which the image is stored. Is", "of keywords to work with in the frontend data : Any ___TO BE", "''' channel_data = copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis, ...] return channel_data def select_result_layer(self,", "name=self.name, hint=self.hint, path_metadata=None, path_image=None, has_bg_layer=self.has_bg_layer, bg_layer_id=self.bg_layer_id, experiment_ids=self.experiment_ids, image_result_layers=db_image_result_layers, result_measurements=db_result_measurements, tags=self.tags ) return db_image", "sets bg_layer_id to given value. create_in_db(sess = None): creates object in database, updates", "bg_layer_id to given value. create_in_db(sess = None): creates object in database, updates objects", "Attribute is changed in db, then object attributes are reloaded from db. '''", "\"\" has_bg_layer: bool = False bg_layer_id: Optional[int] path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer]", "are set to False and None. Parameters: - layer_id(int): Id of result layer", "to work with in the frontend data : Any ___TO BE DONE: add", "if no bg_layer selected, otherwise it holds the bg_layer_id Methods ------- on_init(): Initializes", "Returns object as int class. Parameters: - for_refresh(bool = False): If True, image", "empty string by default. brief description of the object experiment_ids: List[int] empty list", "''' layer = self.select_result_layer(layer_id) if layer_id == self.bg_layer_id: self.set_bg_false() layer.delete() self.refresh_from_db() def estimate_ground_truth_layer(self,", "fileserver. get_image_scaling(): Returns dimensions normalized scales in array with shape (z,y,x) or None.", "initialized as IntResultLayer. add_layer_from_roi(path) add_layer_from_mask(path) ''' uid: int name: str series_index: int metadata:", ".json and loaded. hint : str, optional empty string by default. brief description", "''' Method to measure mask and save result as ResultMeasurement. Creates measurement object", "empty list by default. List of experiments_group ids which use the image. image_result_layers", "bioformats import when image was imported has_bg_layer : bool False by default. Indicates", "self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path", "sess = check_sess(sess) crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid: int, sess=None): ''' Sets images", "object. Object is saved in database and file storage get_thumbnail_path(): Helper function which", "method expects channel index. Returns deep copy of channel with shape (z,y,x). select_result_layer(uid:", "one file (image series), the index of the image is stored here metadata", "Parameters: - layer_id_list(List[int]): List of layer ids to be used for ground truth", "[] image_result_layers: List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement] = [] tags: Set[str] = set()", "use within mistos. As created by app.api.utils_import.acquire_metadata_dict (creates metadata dict for whole series)", "metadata_string = xml.dom.minidom.parseString( metadata_string).toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_string, path_xml) elif self.uid == -2: db_image = self.to_db_class()", "sess = check_sess(sess) crud.update_image_hint(self.uid, new_hint, sess) def update_channel_names(self, channel_names: List[str]): ''' This function", "[_ for _ in self.image_result_layers if _.uid == uid] if len(layers) > 0:", "passed default session will be used (app.api.dependencies.get_db) ''' sess = check_sess(sess) crud.delete_image(self, sess)", "for measurement in self.result_measurements] db_image = DbImage( uid=self.uid, series_index=self.series_index, name=self.name, hint=self.hint, path_metadata=None, path_image=None,", "layer_id_list: List[int], suffix: str = None): ''' Method to estimate ground truth from", "[crud.read_result_layer_by_uid( layer_id).to_int_class().data for layer_id in layer_id_list] ground_truth_estimation_array = utils_results.staple_gte( label_array_list) hint = f\"Following", "result layer and saves it to db and file storage. get_classifiers(clf_type: str) ->", "(image_shape[0], image_shape[-2], image_shape[-1]): int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported mask\", image_id=self.uid, layer_type=\"labels\", data=mask", "= IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid)", "to be deleted. ''' layer = self.select_result_layer(layer_id) if layer_id == self.bg_layer_id: self.set_bg_false() layer.delete()", "{}: clf_dict[\"No classifers found\"] = None return clf_dict def refresh_from_db(self): ''' Requests current", "initializes it (save to db and file storage) Returns IntResultMeasurement object: measurement.measurement has", "which generates path and id. ''' if self.uid == -1: db_image = self.to_db_class()", "for use in napari viewer. Parameters: - clf_type(str): Valid classifier type, for available", ": str the objects name series_index : int if multiple images are imported", "imported has_bg_layer : bool False by default. Indicates if image as layer selected", "result layer of this image. Result layer will be turned to binary, assuming", "db_image.path_metadata) path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml,", "internal operations with images. Attributes ---------- uid : int the objects unique identifier", "be binarized, all labels > 0 will be unified and represented as foreground", "of image if multiple images were imported in a single file name :", "generated and updated in object. Parameters: - sess(sqlalchemy.orm.Session): The database session to be", "com class. ''' kwargs = self.dict() kwargs[\"metadata\"] = utils_import.load_metadata_only( self.path_metadata) kwargs[\"imageResultLayers\"] = [image_result_layer.to_com_class()", "set_bg_true(self, image_layer: IntImageResultLayer): ''' Method to set layer as background layer. Parameters: -", "db_image.path_metadata) # metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_omexml = self.metadata_omexml.toprettyxml(indent=\"\\t\") fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail", "and bg_layer_id to None. Attribute is changed in db, then object attributes are", "path_metadata: Optional[Path] path_image: Optional[Path] image_result_layers: List[DbImageResultLayer] = [] measurements: List[DbResultMeasurement] = [] tags:", "cause trouble. metadata_omexml : Any original metadata xml data as read by bioformats", "be selected as background layer. ''' layer_uid = image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid)", "layer_id: int): ''' Method to delete a result layer by uid. If result", "suffix: str = None): ''' Method to estimate ground truth from multiple layers", "to None. Attribute is changed in db, then object attributes are reloaded from", "\"\" experiment_ids: List[int] = [] image_result_layers: List[IntImageResultLayer] = [] result_measurements: List[IntResultMeasurement] = []", "check_sess(sess) crud.update_image_bg_true(self.uid, layer_uid, sess) def create_in_db(self, sess=None): ''' Creates object in db. Paths", "image. image_result_layers : List[IntImageResultLayer] empty list by default. List of all associated IntImageResultLayer", "used (app.api.dependencies.get_db) ''' sess = check_sess(sess) sql_image = crud.create_image(self, sess) self.uid = sql_image.id", "(z,c,y,x) in which the image is stored. Is loaded from .zarr files, most", "data=_mask ) int_result_layer.on_init() self.refresh_from_db() self.measure_mask_in_image(int_result_layer.uid) else: warnings.warn( f\"Mask shape {mask.shape} does not match", "copy.deepcopy(self.data[:, channel, ...])[ :, np.newaxis, ...] return channel_data def select_result_layer(self, uid: int): layers", "np.newaxis, ...] return channel_data def select_result_layer(self, uid: int): layers = [_ for _", "db. set_bg_true(layer_uid: int, sess = None) sets \"has_bg_layer\" property to True in db.", "path to the images metadata \".json\". Automatically generated as image is saved to", "fsr.save_metadata_xml(metadata_omexml, path_xml) # save thumbnail thumbnail = utils_import.generate_thumbnail(self.data) thumbnail_path = self.get_thumbnail_path() fsr.save_thumbnail(thumbnail, thumbnail_path)", "self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs) def", "associated DbResultMeasurement objects tags: Set[str] = [] set of string keywords to easily", "layer. ''' layer_uid = image_layer.uid db_image = self.to_db_class() db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel:", "False: data = fsr.load_zarr(kwargs[\"path_image\"]) kwargs[\"data\"] = data else: kwargs[\"data\"] = None metadata =", "as image is saved to database. path_image: pathlib.Path, optional path to the images", "from metadata, defaulting to None\") return dims def to_db_class(self): ''' Transforms internal class", "probabilities. For ground truth estimation layer will be binarized, all labels > 0", "result layer is selected as background layer, the attributes \"has_bg_layer\" and \"bg_layer_id\" are", "zeros if no background layer is defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns measurement", ": str, optional empty string by default. brief description of the object experiment_ids:", "initialize the object. Handles image as new image if \"uid\" == -1 and", "shape (z,y,x) or None. to_db_class() -> app.api.classes_db.DbImage: Returns object as DbImage object. set_bg_false():", "background layer, otherwise id of the background layer. path_metadata: pathlib.Path, optional path to", "or None if no scaling information was provided in metadata. ''' x =", "app import crud from app import fileserver_requests as fsr from app.api import utils_import,", "fsr.load_json(self.path_metadata) metadata[\"custom_channel_names\"] = channel_names fsr.save_metadata(metadata, self.path_metadata) def delete_from_system(self, sess=None): ''' calls crud.delete_image and", "uses SimpleITKs STAPLE algorithm to estimate ground truth. Resulting layer will be initialized", "db_image.path_metadata) # save metadata xml path_xml = utils_paths.make_metadata_xml_path_from_json_path( db_image.path_metadata) metadata_string = self.metadata_omexml.to_xml(encoding=\"utf-8\") metadata_string", "crud.update_image_bg_false(self.uid, sess) def set_bg_true(self, layer_uid: int, sess=None): ''' Sets images bg_layer_id property to", "Loads all classifiers of given type from database. Returns dictionary of format {\"UID_NAME\":", "custom field type___ array of shape (z,c,y,x) in which the image is stored.", "self.measure_mask_in_image(int_result_layer.uid) def add_layer_from_roi(self, path: Path): mask = utils_import.read_roi(path, self.data.shape) int_result_layer = IntImageResultLayer( uid=-1,", "self.measurements] kwargs[\"seriesIndex\"] = self.series_index kwargs[\"hasBgLayer\"] = self.has_bg_layer kwargs[\"bgLayerId\"] = self.bg_layer_id kwargs[\"tags\"] = list(self.tags)", "channel: int): ''' Helper method expects channel index. Returns deep copy of channel", "and the image. If layer was background_layer, corresponding attributes are reset. estimate_ground_truth_layer(layer_id_list: List[int],", "- channel_names(List[str]): List of strings to be saved as channel names. ''' metadata", "from db and updates the objects attributes. delete_result_layer(layer_id: int): Deletes the layer from", "None if no associated background layer, otherwise id of the background layer. path_metadata:", "calls crud.delete_image and passed db_image object to delete all associated files and db", "from pathlib import Path class DbImage(BaseModel): ''' A class to handle database and", "use the image. image_result_layers : List[IntImageResultLayer] empty list by default. List of all", "\"has_bg_layer\" property to True in db. sets bg_layer_id to given value. create_in_db(sess =", "uid: int name: str series_index: int metadata: dict hint: Optional[str] = \"\" experiment_ids:", "db_image.set_bg_true(layer_uid) self.refresh_from_db() def select_channel(self, channel: int): ''' Helper method expects channel index. Returns", "= self.metadata['pixel_size_physical_x'] y = self.metadata['pixel_size_physical_y'] z = self.metadata['pixel_size_physical_z'] n_z = self.metadata['pixel_size_z'] if n_z", "bg_mask = np.zeros(( self.data.shape[0], self.data.shape[2], self.data.shape[3] )) if bg_mask.max() < 2: bg_mask =", "and zeros if no background layer is defined. measure_mask_in_image(layer_id: int) -> app.api.classes_int.IntMeasurementResult: Returns", "to delete a result layer by uid. If result layer is selected as", "delete a result layer by uid. If result layer is selected as background", "original metadata xml data as read by bioformats import when image was imported", "app.api.classes_int.IntMeasurementResult: Returns measurement object for given result layer and saves it to db", "objects attributes. delete_result_layer(layer_id: int): Deletes the layer from database, file storage and the", "UID}. Mainly for use in napari viewer. Parameters: - clf_type(str): Valid classifier type,", "typing import Any, List, Optional, Set import numpy as np from app import", "for result_layer in self.image_result_layers] db_result_measurements = [measurement.to_db_class() for measurement in self.result_measurements] db_image =", "int_result_layer = IntImageResultLayer( uid=-1, name=f\"{path.name}\", hint=\"imported maks\", image_id=self.uid, layer_type=\"labels\", data=mask ) int_result_layer.on_init() self.refresh_from_db()", "image_result_layers : List[IntImageResultLayer] empty list by default. List of all associated IntImageResultLayer objects.", "def on_init(self): ''' Method to initialize the object. Handles image as new image", "and joins with return value from utils_paths.make_thumbnail_path Returns path as pathlib.Path ''' return", "self.bg_layer_id kwargs[\"tags\"] = list(self.tags) return ComImage(**kwargs) def set_bg_false(self, sess=None): ''' Sets imagaes has_bg_layer", "= check_sess(sess) sql_image = crud.create_image(self, sess) self.uid = sql_image.id self.path_image = Path(sql_image.path_image) self.path_metadata", "associated background layer, otherwise id of the background layer. path_metadata: pathlib.Path, optional path", "from app import fileserver_requests as fsr from app.api import utils_import, utils_paths, utils_results from" ]
[ ".eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group import eg007 from .eg008_grant_office_access_to_form_group", "import eg002 from .eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters import", "from .eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group import eg007 from", ".eg002_create_room_with_template import eg002 from .eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters", ".eg001_create_room_with_data import eg001Rooms from .eg002_create_room_with_template import eg002 from .eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room", "eg005 from .eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group import eg007 from .eg008_grant_office_access_to_form_group import eg008", "eg003 from .eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session import eg006", ".eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session", "from .eg002_create_room_with_template import eg002 from .eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room import eg004 from", "from .eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session import eg006 from", "from .eg001_create_room_with_data import eg001Rooms from .eg002_create_room_with_template import eg002 from .eg003_export_data_from_room import eg003 from", "import eg004 from .eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group import", ".eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group import eg007 from .eg008_grant_office_access_to_form_group import eg008 from .eg009_assign_form_to_form_group", "<gh_stars>10-100 from .eg001_create_room_with_data import eg001Rooms from .eg002_create_room_with_template import eg002 from .eg003_export_data_from_room import eg003", "from .eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group import eg007 from .eg008_grant_office_access_to_form_group import eg008 from", "import eg005 from .eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group import eg007 from .eg008_grant_office_access_to_form_group import", "import eg006 from .eg007_create_form_group import eg007 from .eg008_grant_office_access_to_form_group import eg008 from .eg009_assign_form_to_form_group import", "eg001Rooms from .eg002_create_room_with_template import eg002 from .eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room import eg004", "eg004 from .eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group import eg007", "eg002 from .eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters import eg005", "import eg003 from .eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session import", "from .eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters import eg005 from", "eg006 from .eg007_create_form_group import eg007 from .eg008_grant_office_access_to_form_group import eg008 from .eg009_assign_form_to_form_group import eg009", ".eg004_add_forms_to_room import eg004 from .eg005_get_rooms_with_filters import eg005 from .eg006_create_external_form_fill_session import eg006 from .eg007_create_form_group", "import eg001Rooms from .eg002_create_room_with_template import eg002 from .eg003_export_data_from_room import eg003 from .eg004_add_forms_to_room import" ]
[ "Tuple[str, str]: if args.username is None: args.username = input('Username: ') if args.password is", "for %s: invalid option: --method %s', service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as", "args.password log.warning('If you don\\'t want to give your password to this program, you", "not in ['github', 'twitter']: log.failure('login for yukicoder: invalid option: --method %s', args.method) sys.exit(1)", "if args.method: log.failure('login for %s: invalid option: --method %s', service.get_name(), args.method) sys.exit(1) with", "this program, you can give only your session tokens.') log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/LOGIN_WITH_COOKIES.md') service.login(get_credentials, session=sess,", "login def get_credentials() -> Tuple[str, str]: if args.username is None: args.username = input('Username:", "service is None: sys.exit(1) # configure kwargs = {} if service.get_name() == 'yukicoder':", "option: --method %s', service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if args.check:", "def get_credentials() -> Tuple[str, str]: if args.username is None: args.username = input('Username: ')", "# configure kwargs = {} if service.get_name() == 'yukicoder': if not args.method: args.method", "invalid option: --method %s', service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if", "service.is_logged_in(session=sess): log.info('You have already signed in.') else: log.info('You are not signed in.') sys.exit(1)", "%s', args.method) sys.exit(1) kwargs['method'] = args.method else: if args.method: log.failure('login for %s: invalid", "else: # login def get_credentials() -> Tuple[str, str]: if args.username is None: args.username", "get service service = onlinejudge.dispatch.service_from_url(args.url) if service is None: sys.exit(1) # configure kwargs", "utils if TYPE_CHECKING: import argparse def login(args: 'argparse.Namespace') -> None: # get service", "def login(args: 'argparse.Namespace') -> None: # get service service = onlinejudge.dispatch.service_from_url(args.url) if service", "if args.check: if service.is_logged_in(session=sess): log.info('You have already signed in.') else: log.info('You are not", "3.x import getpass import sys from typing import * import onlinejudge import onlinejudge.implementation.logging", "args.method) sys.exit(1) kwargs['method'] = args.method else: if args.method: log.failure('login for %s: invalid option:", "not args.method: args.method = 'github' if args.method not in ['github', 'twitter']: log.failure('login for", "None: sys.exit(1) # configure kwargs = {} if service.get_name() == 'yukicoder': if not", "# get service service = onlinejudge.dispatch.service_from_url(args.url) if service is None: sys.exit(1) # configure", "'github' if args.method not in ['github', 'twitter']: log.failure('login for yukicoder: invalid option: --method", "args.method: args.method = 'github' if args.method not in ['github', 'twitter']: log.failure('login for yukicoder:", "as log import onlinejudge.implementation.utils as utils if TYPE_CHECKING: import argparse def login(args: 'argparse.Namespace')", "sys.exit(1) kwargs['method'] = args.method else: if args.method: log.failure('login for %s: invalid option: --method", "program, you can give only your session tokens.') log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/LOGIN_WITH_COOKIES.md') service.login(get_credentials, session=sess, **kwargs)", "getpass.getpass() return args.username, args.password log.warning('If you don\\'t want to give your password to", "from typing import * import onlinejudge import onlinejudge.implementation.logging as log import onlinejudge.implementation.utils as", "path=args.cookie) as sess: if args.check: if service.is_logged_in(session=sess): log.info('You have already signed in.') else:", "configure kwargs = {} if service.get_name() == 'yukicoder': if not args.method: args.method =", "'twitter']: log.failure('login for yukicoder: invalid option: --method %s', args.method) sys.exit(1) kwargs['method'] = args.method", "log.info('You are not signed in.') sys.exit(1) else: # login def get_credentials() -> Tuple[str,", "'argparse.Namespace') -> None: # get service service = onlinejudge.dispatch.service_from_url(args.url) if service is None:", "login(args: 'argparse.Namespace') -> None: # get service service = onlinejudge.dispatch.service_from_url(args.url) if service is", "TYPE_CHECKING: import argparse def login(args: 'argparse.Namespace') -> None: # get service service =", "onlinejudge.implementation.utils as utils if TYPE_CHECKING: import argparse def login(args: 'argparse.Namespace') -> None: #", "in.') sys.exit(1) else: # login def get_credentials() -> Tuple[str, str]: if args.username is", "Version: 3.x import getpass import sys from typing import * import onlinejudge import", "args.username, args.password log.warning('If you don\\'t want to give your password to this program,", "kwargs = {} if service.get_name() == 'yukicoder': if not args.method: args.method = 'github'", "* import onlinejudge import onlinejudge.implementation.logging as log import onlinejudge.implementation.utils as utils if TYPE_CHECKING:", "log import onlinejudge.implementation.utils as utils if TYPE_CHECKING: import argparse def login(args: 'argparse.Namespace') ->", "invalid option: --method %s', args.method) sys.exit(1) kwargs['method'] = args.method else: if args.method: log.failure('login", "with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if args.check: if service.is_logged_in(session=sess): log.info('You have already signed", "') if args.password is None: args.password = getpass.getpass() return args.username, args.password log.warning('If you", "import onlinejudge.implementation.utils as utils if TYPE_CHECKING: import argparse def login(args: 'argparse.Namespace') -> None:", "= onlinejudge.dispatch.service_from_url(args.url) if service is None: sys.exit(1) # configure kwargs = {} if", "if args.username is None: args.username = input('Username: ') if args.password is None: args.password", "you don\\'t want to give your password to this program, you can give", "'yukicoder': if not args.method: args.method = 'github' if args.method not in ['github', 'twitter']:", "sys.exit(1) else: # login def get_credentials() -> Tuple[str, str]: if args.username is None:", "Python Version: 3.x import getpass import sys from typing import * import onlinejudge", "import argparse def login(args: 'argparse.Namespace') -> None: # get service service = onlinejudge.dispatch.service_from_url(args.url)", "give your password to this program, you can give only your session tokens.')", "-> Tuple[str, str]: if args.username is None: args.username = input('Username: ') if args.password", "else: if args.method: log.failure('login for %s: invalid option: --method %s', service.get_name(), args.method) sys.exit(1)", "for yukicoder: invalid option: --method %s', args.method) sys.exit(1) kwargs['method'] = args.method else: if", "input('Username: ') if args.password is None: args.password = getpass.getpass() return args.username, args.password log.warning('If", "service = onlinejudge.dispatch.service_from_url(args.url) if service is None: sys.exit(1) # configure kwargs = {}", "already signed in.') else: log.info('You are not signed in.') sys.exit(1) else: # login", "not signed in.') sys.exit(1) else: # login def get_credentials() -> Tuple[str, str]: if", "service.get_name() == 'yukicoder': if not args.method: args.method = 'github' if args.method not in", "service service = onlinejudge.dispatch.service_from_url(args.url) if service is None: sys.exit(1) # configure kwargs =", "log.info('You have already signed in.') else: log.info('You are not signed in.') sys.exit(1) else:", "# Python Version: 3.x import getpass import sys from typing import * import", "get_credentials() -> Tuple[str, str]: if args.username is None: args.username = input('Username: ') if", "argparse def login(args: 'argparse.Namespace') -> None: # get service service = onlinejudge.dispatch.service_from_url(args.url) if", "don\\'t want to give your password to this program, you can give only", "sys.exit(1) # configure kwargs = {} if service.get_name() == 'yukicoder': if not args.method:", "want to give your password to this program, you can give only your", "--method %s', service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if args.check: if", "if service is None: sys.exit(1) # configure kwargs = {} if service.get_name() ==", "args.method: log.failure('login for %s: invalid option: --method %s', service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(),", "= input('Username: ') if args.password is None: args.password = getpass.getpass() return args.username, args.password", "args.password is None: args.password = getpass.getpass() return args.username, args.password log.warning('If you don\\'t want", "args.check: if service.is_logged_in(session=sess): log.info('You have already signed in.') else: log.info('You are not signed", "is None: sys.exit(1) # configure kwargs = {} if service.get_name() == 'yukicoder': if", "as sess: if args.check: if service.is_logged_in(session=sess): log.info('You have already signed in.') else: log.info('You", "yukicoder: invalid option: --method %s', args.method) sys.exit(1) kwargs['method'] = args.method else: if args.method:", "kwargs['method'] = args.method else: if args.method: log.failure('login for %s: invalid option: --method %s',", "# login def get_credentials() -> Tuple[str, str]: if args.username is None: args.username =", "if not args.method: args.method = 'github' if args.method not in ['github', 'twitter']: log.failure('login", "service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if args.check: if service.is_logged_in(session=sess): log.info('You", "in ['github', 'twitter']: log.failure('login for yukicoder: invalid option: --method %s', args.method) sys.exit(1) kwargs['method']", "if args.password is None: args.password = getpass.getpass() return args.username, args.password log.warning('If you don\\'t", "{} if service.get_name() == 'yukicoder': if not args.method: args.method = 'github' if args.method", "%s', service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if args.check: if service.is_logged_in(session=sess):", "is None: args.username = input('Username: ') if args.password is None: args.password = getpass.getpass()", "onlinejudge.implementation.logging as log import onlinejudge.implementation.utils as utils if TYPE_CHECKING: import argparse def login(args:", "None: args.username = input('Username: ') if args.password is None: args.password = getpass.getpass() return", "--method %s', args.method) sys.exit(1) kwargs['method'] = args.method else: if args.method: log.failure('login for %s:", "sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if args.check: if service.is_logged_in(session=sess): log.info('You have already", "import * import onlinejudge import onlinejudge.implementation.logging as log import onlinejudge.implementation.utils as utils if", "-> None: # get service service = onlinejudge.dispatch.service_from_url(args.url) if service is None: sys.exit(1)", "onlinejudge import onlinejudge.implementation.logging as log import onlinejudge.implementation.utils as utils if TYPE_CHECKING: import argparse", "args.method not in ['github', 'twitter']: log.failure('login for yukicoder: invalid option: --method %s', args.method)", "log.failure('login for yukicoder: invalid option: --method %s', args.method) sys.exit(1) kwargs['method'] = args.method else:", "in.') else: log.info('You are not signed in.') sys.exit(1) else: # login def get_credentials()", "log.failure('login for %s: invalid option: --method %s', service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie)", "import sys from typing import * import onlinejudge import onlinejudge.implementation.logging as log import", "as utils if TYPE_CHECKING: import argparse def login(args: 'argparse.Namespace') -> None: # get", "option: --method %s', args.method) sys.exit(1) kwargs['method'] = args.method else: if args.method: log.failure('login for", "args.method else: if args.method: log.failure('login for %s: invalid option: --method %s', service.get_name(), args.method)", "%s: invalid option: --method %s', service.get_name(), args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess:", "else: log.info('You are not signed in.') sys.exit(1) else: # login def get_credentials() ->", "you can give only your session tokens.') log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/LOGIN_WITH_COOKIES.md') service.login(get_credentials, session=sess, **kwargs) #", "args.username = input('Username: ') if args.password is None: args.password = getpass.getpass() return args.username,", "args.method = 'github' if args.method not in ['github', 'twitter']: log.failure('login for yukicoder: invalid", "args.username is None: args.username = input('Username: ') if args.password is None: args.password =", "import onlinejudge.implementation.logging as log import onlinejudge.implementation.utils as utils if TYPE_CHECKING: import argparse def", "import onlinejudge import onlinejudge.implementation.logging as log import onlinejudge.implementation.utils as utils if TYPE_CHECKING: import", "can give only your session tokens.') log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/LOGIN_WITH_COOKIES.md') service.login(get_credentials, session=sess, **kwargs) # type:", "if args.method not in ['github', 'twitter']: log.failure('login for yukicoder: invalid option: --method %s',", "sess: if args.check: if service.is_logged_in(session=sess): log.info('You have already signed in.') else: log.info('You are", "= args.method else: if args.method: log.failure('login for %s: invalid option: --method %s', service.get_name(),", "utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if args.check: if service.is_logged_in(session=sess): log.info('You have already signed in.')", "= 'github' if args.method not in ['github', 'twitter']: log.failure('login for yukicoder: invalid option:", "getpass import sys from typing import * import onlinejudge import onlinejudge.implementation.logging as log", "to give your password to this program, you can give only your session", "= {} if service.get_name() == 'yukicoder': if not args.method: args.method = 'github' if", "log.warning('If you don\\'t want to give your password to this program, you can", "None: args.password = getpass.getpass() return args.username, args.password log.warning('If you don\\'t want to give", "are not signed in.') sys.exit(1) else: # login def get_credentials() -> Tuple[str, str]:", "have already signed in.') else: log.info('You are not signed in.') sys.exit(1) else: #", "is None: args.password = getpass.getpass() return args.username, args.password log.warning('If you don\\'t want to", "if TYPE_CHECKING: import argparse def login(args: 'argparse.Namespace') -> None: # get service service", "onlinejudge.dispatch.service_from_url(args.url) if service is None: sys.exit(1) # configure kwargs = {} if service.get_name()", "if service.get_name() == 'yukicoder': if not args.method: args.method = 'github' if args.method not", "signed in.') sys.exit(1) else: # login def get_credentials() -> Tuple[str, str]: if args.username", "args.password = getpass.getpass() return args.username, args.password log.warning('If you don\\'t want to give your", "str]: if args.username is None: args.username = input('Username: ') if args.password is None:", "password to this program, you can give only your session tokens.') log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/LOGIN_WITH_COOKIES.md')", "to this program, you can give only your session tokens.') log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/LOGIN_WITH_COOKIES.md') service.login(get_credentials,", "sys from typing import * import onlinejudge import onlinejudge.implementation.logging as log import onlinejudge.implementation.utils", "None: # get service service = onlinejudge.dispatch.service_from_url(args.url) if service is None: sys.exit(1) #", "import getpass import sys from typing import * import onlinejudge import onlinejudge.implementation.logging as", "typing import * import onlinejudge import onlinejudge.implementation.logging as log import onlinejudge.implementation.utils as utils", "<gh_stars>0 # Python Version: 3.x import getpass import sys from typing import *", "['github', 'twitter']: log.failure('login for yukicoder: invalid option: --method %s', args.method) sys.exit(1) kwargs['method'] =", "give only your session tokens.') log.info('see: https://github.com/kmyk/online-judge-tools/blob/master/LOGIN_WITH_COOKIES.md') service.login(get_credentials, session=sess, **kwargs) # type: ignore", "your password to this program, you can give only your session tokens.') log.info('see:", "== 'yukicoder': if not args.method: args.method = 'github' if args.method not in ['github',", "= getpass.getpass() return args.username, args.password log.warning('If you don\\'t want to give your password", "args.method) sys.exit(1) with utils.with_cookiejar(utils.new_default_session(), path=args.cookie) as sess: if args.check: if service.is_logged_in(session=sess): log.info('You have", "return args.username, args.password log.warning('If you don\\'t want to give your password to this", "signed in.') else: log.info('You are not signed in.') sys.exit(1) else: # login def", "if service.is_logged_in(session=sess): log.info('You have already signed in.') else: log.info('You are not signed in.')" ]
[ ": 'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json' :", "= 0 for i in range(0,int(len(keys)/2)): try: i = i*2 json_file = open(model_names.get(keys[i]),", "model_names = {'clivage_1_json' : 'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2' :", ": 'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json' :", "= {'clivage_1_json' : 'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2' : 'models/model2.h5',", "'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3' : 'models/model3.h5',", "= model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\") models.append(loaded_model) except Exception as e: raise", "loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\") models.append(loaded_model) except Exception as e: raise else: pass", "'clivage_5_json' : 'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json'", "'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6' : 'models/model6.h5',", ": 'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9' : 'models/model9.h5'} def automatic_loading", "[] j = 0 for i in range(0,int(len(keys)/2)): try: i = i*2 json_file", "import model_from_json from keras.models import load_model model_names = {'clivage_1_json' : 'models/model1.json', 'clivage_1' :", "= list(model_names.keys()) #print(keys) models = [] j = 0 for i in range(0,int(len(keys)/2)):", "<gh_stars>0 from keras.models import model_from_json from keras.models import load_model model_names = {'clivage_1_json' :", "'clivage_8_json' : 'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9' : 'models/model9.h5'} def", "load_model model_names = {'clivage_1_json' : 'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2'", "'clivage_8' : 'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9' : 'models/model9.h5'} def automatic_loading () :", ": 'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3' :", "'clivage_7_json' : 'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json'", "'models/model9.h5'} def automatic_loading () : keys = list(model_names.keys()) #print(keys) models = [] j", "automatic_loading () : keys = list(model_names.keys()) #print(keys) models = [] j = 0", "json_file = open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded", "'models/model9.json', 'clivage_9' : 'models/model9.h5'} def automatic_loading () : keys = list(model_names.keys()) #print(keys) models", "in range(0,int(len(keys)/2)): try: i = i*2 json_file = open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read()", ": 'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json' :", ": 'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9' : 'models/model9.h5'} def automatic_loading () : keys", "'clivage_4' : 'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6'", "() : keys = list(model_names.keys()) #print(keys) models = [] j = 0 for", "'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7' : 'models/model7.h5',", "'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\")", ": 'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8' :", "for i in range(0,int(len(keys)/2)): try: i = i*2 json_file = open(model_names.get(keys[i]), 'r') loaded_model_json", "0 for i in range(0,int(len(keys)/2)): try: i = i*2 json_file = open(model_names.get(keys[i]), 'r')", "'clivage_9_json' : 'models/model9.json', 'clivage_9' : 'models/model9.h5'} def automatic_loading () : keys = list(model_names.keys())", "'clivage_6' : 'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8'", "try: i = i*2 json_file = open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model", "keys = list(model_names.keys()) #print(keys) models = [] j = 0 for i in", "range(0,int(len(keys)/2)): try: i = i*2 json_file = open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read() json_file.close()", "'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json' : 'models/model7.json',", "'clivage_3_json' : 'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json'", "model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\") models.append(loaded_model) except Exception as e: raise else:", ": 'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6' :", "i = i*2 json_file = open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model =", ": 'models/model9.json', 'clivage_9' : 'models/model9.h5'} def automatic_loading () : keys = list(model_names.keys()) #print(keys)", "#print(keys) models = [] j = 0 for i in range(0,int(len(keys)/2)): try: i", "'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9' : 'models/model9.h5'} def automatic_loading () : keys =", "model_from_json from keras.models import load_model model_names = {'clivage_1_json' : 'models/model1.json', 'clivage_1' : 'models/model1.h5',", ": 'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json' :", "'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5' : 'models/model5.h5',", "from keras.models import model_from_json from keras.models import load_model model_names = {'clivage_1_json' : 'models/model1.json',", "j = 0 for i in range(0,int(len(keys)/2)): try: i = i*2 json_file =", "'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json' : 'models/model4.json',", "'clivage_1' : 'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3'", ": 'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9' :", "'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json' : 'models/model8.json',", "'clivage_2' : 'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4'", "import load_model model_names = {'clivage_1_json' : 'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json' : 'models/model2.json',", "loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\") models.append(loaded_model)", "def automatic_loading () : keys = list(model_names.keys()) #print(keys) models = [] j =", ": 'models/model9.h5'} def automatic_loading () : keys = list(model_names.keys()) #print(keys) models = []", "loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\") models.append(loaded_model) except Exception as e:", "= json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\") models.append(loaded_model) except", ": 'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json' :", "from keras.models import load_model model_names = {'clivage_1_json' : 'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json'", "i in range(0,int(len(keys)/2)): try: i = i*2 json_file = open(model_names.get(keys[i]), 'r') loaded_model_json =", "disk\") models.append(loaded_model) except Exception as e: raise else: pass finally: pass return models", "open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from", "'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json' : 'models/model6.json',", "'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4' : 'models/model4.h5',", "models = [] j = 0 for i in range(0,int(len(keys)/2)): try: i =", "'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json' : 'models/model3.json',", "'clivage_9' : 'models/model9.h5'} def automatic_loading () : keys = list(model_names.keys()) #print(keys) models =", "model from disk\") models.append(loaded_model) except Exception as e: raise else: pass finally: pass", "from disk\") models.append(loaded_model) except Exception as e: raise else: pass finally: pass return", ": 'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5' :", "models.append(loaded_model) except Exception as e: raise else: pass finally: pass return models #print(automatic_loading())", "i*2 json_file = open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1]))", "list(model_names.keys()) #print(keys) models = [] j = 0 for i in range(0,int(len(keys)/2)): try:", "'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json' : 'models/model5.json',", "'clivage_3' : 'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5'", "keras.models import load_model model_names = {'clivage_1_json' : 'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json' :", "'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8' : 'models/model8.h5',", "'clivage_7' : 'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9'", "'clivage_6_json' : 'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json'", "{'clivage_1_json' : 'models/model1.json', 'clivage_1' : 'models/model1.h5', 'clivage_2_json' : 'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json'", "= [] j = 0 for i in range(0,int(len(keys)/2)): try: i = i*2", ": 'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json' : 'models/model4.json', 'clivage_4' :", ": 'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7' :", "'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9' : 'models/model9.h5'}", ": keys = list(model_names.keys()) #print(keys) models = [] j = 0 for i", "json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\") models.append(loaded_model) except Exception", "'clivage_2_json' : 'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json'", "keras.models import model_from_json from keras.models import load_model model_names = {'clivage_1_json' : 'models/model1.json', 'clivage_1'", "'clivage_5' : 'models/model5.h5', 'clivage_6_json' : 'models/model6.json', 'clivage_6' : 'models/model6.h5', 'clivage_7_json' : 'models/model7.json', 'clivage_7'", "= open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model", "= i*2 json_file = open(model_names.get(keys[i]), 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json)", "'clivage_4_json' : 'models/model4.json', 'clivage_4' : 'models/model4.h5', 'clivage_5_json' : 'models/model5.json', 'clivage_5' : 'models/model5.h5', 'clivage_6_json'", "'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json' : 'models/model9.json',", "'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json' : 'models/model9.json', 'clivage_9' : 'models/model9.h5'} def automatic_loading ()", ": 'models/model7.json', 'clivage_7' : 'models/model7.h5', 'clivage_8_json' : 'models/model8.json', 'clivage_8' : 'models/model8.h5', 'clivage_9_json' :", "print(\"Loaded model from disk\") models.append(loaded_model) except Exception as e: raise else: pass finally:", ": 'models/model2.json', 'clivage_2' : 'models/model2.h5', 'clivage_3_json' : 'models/model3.json', 'clivage_3' : 'models/model3.h5', 'clivage_4_json' :", "json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_names.get(keys[i+1])) print(\"Loaded model from disk\") models.append(loaded_model) except Exception as" ]
[]
[ "try: reply = self.resumeOperations() if reply: print(\"Operations will be executed\") self.mainBuilder = MainBuilder(", "True else: self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver =", "if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else: self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked(): if not", "self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while creating folders for FTCM and TDK versions!') self.versionNumber", "reply: print(\"Operations will be executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk,", "+']\\n\\n' else: operationsLog += 'Building a NON_OFFICIAL VERSION of OVERLORD, version number is", "\"List of Operations\", operationsLog) reply = QMessageBox.question(self, \"List of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)", "if self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK DEVICE DRIVER: YES\\n\" else: operationsLog += \"Building", "def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass def main_workflow(self):", "+ self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked):", "else: self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True", "+= \"Would you like to continue using these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of", "print('Directory %s has been created.'% (TDKfolderToBeChecked)) return True except MyException as e: QMessageBox.", "def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def", "self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\", \"Operations have been finished!\") pass app = QtGui.QApplication(sys.argv)", "number is ['+ self.versionNumber +']\\n\\n' else: operationsLog += 'Building a NON_OFFICIAL VERSION of", "specified!') if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath", "<reponame>neobepmat/BatchBuilder # Temperature-conversion program using PyQt import sys, os from PyQt4 import QtGui,", "FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\" +", "+= \"Building TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog += \"Building TDK in DEBUG: YES\\n\\n\"", "self.resumeOperations() if reply: print(\"Operations will be executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord,", "= QMessageBox.question(self, \"List of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: return", "about(self, \"CheckDirectoryExistence - Exception\", str(e)) return False def resumeOperations(self): if not self.edtVersion.text(): raise", "DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG:", "versions!') self.versionNumber = self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog +=", "OVERLORD: YES\\n\" else: operationsLog += \"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog += \"Building", "= False self.configurator = BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else:", "False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else: self.doBuildTdkDesign = False if", "in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: NO\\n\\n\"", "TDK: YES\\n\" else: operationsLog += \"Building TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog += \"Building", "self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord, #", "class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder)", "self.edtVersion.text(): raise MyException('Version number not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\"", "FTCM and TDK versions!') self.versionNumber = self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList() if", "self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\", \"Operations have been finished!\") pass app", "= False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else: self.doBuildTdkDesign = False", "= True else: self.isOfficialVersion = False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True", "if self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog += \"Building CONFIGURATORE", "os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not exist. It will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory", "self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord = True else: self.doBuildOverlord = False if", "True self.doBuildOverlord = True else: self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign:", "+= \"Building TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog += \"Building OVERLORD: YES\\n\" else:", "doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has been stopped by the operator!\") except MyException", "FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not exist. It will", "self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\"", "['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog += \"Building TDK in DESIGN: YES\\n\\n\" else:", "TDK in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK in DEBUG: NO\\n\\n\" if", "self.isOfficialVersion = False self.versionNumber = None self.doBuildTdkDesign = False self.doBuildOverlord = False self.doBuildConfiguratore", "else: self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug = False", "= True else: self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign", "\"Building TDK DEVICE DRIVER SETUP: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER", "of Operations - Exception\", str(e)) return False def launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self):", "False if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore = True", "= True else: self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup", "created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory", "NO\\n\\n\" if self.doTdkSetup: operationsLog += \"Building OVERLORD SETUP: YES\\n\\n\" else: operationsLog += \"Building", "OVERLORD, version number is ['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog += \"Building TDK", "NO\\n\" if self.doBuildTdk_Debug: operationsLog += \"Building TDK in DEBUG: YES\\n\\n\" else: operationsLog +=", "DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog += \"Building OVERLORD: YES\\n\" else: operationsLog += \"Building", "def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def", "operationsLog += \"Building OVERLORD SETUP: YES\\n\\n\" else: operationsLog += \"Building OVERLORD SETUP: NO\\n\\n\"", "self.doBuildTdkDeviceDriver = False self.doTdkSetup = False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug", "version number is ['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog += \"Building TDK in", "False self.configurator = BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else: self.isOfficialVersion", "QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: return True else: return False def validate_builder(self):", "checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not exist. It", "else: operationsLog += 'Building a NON_OFFICIAL VERSION of OVERLORD, version number is ['+", "operationsLog += \"Building TDK in DESIGN: YES\\n\\n\" else: operationsLog += \"Building TDKin DESIGN:", "to continue using these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of Operations\", operationsLog) reply =", "if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug =", "TDK DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog += \"Would you like to continue using", "\"Building OVERLORD: YES\\n\" else: operationsLog += \"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog +=", "QtGui.QMessageBox.Yes: return True else: return False def validate_builder(self): try: reply = self.resumeOperations() if", "NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: YES\\n\\n\" else:", "else: self.isOfficialVersion = False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else: self.doBuildTdkDesign", "+ \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked", "as e: QMessageBox.about(self, \"List of Operations - Exception\", str(e)) return False def launch_vb6(self):", "True else: self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked):", "+= \"Building TDK DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog += \"Would you like to", "has been created.'% (TDKfolderToBeChecked)) return True except MyException as e: QMessageBox. about(self, \"CheckDirectoryExistence", "def launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List", "%s has been created.'% (TDKfolderToBeChecked)) return True except MyException as e: QMessageBox. about(self,", "as e: QMessageBox. about(self, \"CheckDirectoryExistence - Exception\", str(e)) return False def resumeOperations(self): if", "\"Building TDK DEVICE DRIVER in DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog += \"Building OVERLORD", "DEVICE DRIVER in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER in", "\"CheckDirectoryExistence - Exception\", str(e)) return False def resumeOperations(self): if not self.edtVersion.text(): raise MyException('Version", "print(operationsLog) #QMessageBox.about(self, \"List of Operations\", operationsLog) reply = QMessageBox.question(self, \"List of Operations\", operationsLog,", "self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug = False self.configurator = BuilderConfiguration() def", "self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text() if", "PyQt4.QtGui import QMessageBox from main import * from builder_configuration import BuilderConfiguration form_class =", "not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore = True else: self.doBuildConfiguratore = False", "DEVICE DRIVER: YES\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug:", "self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord, # doConfiguratore, doTdk,", "if not self.edtVersion.text(): raise MyException('Version number not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath", "return True except MyException as e: QMessageBox. about(self, \"CheckDirectoryExistence - Exception\", str(e)) return", "False self.doBuildConfiguratore = False self.doBuildTdk = False self.doBuildTdkDeviceDriver = False self.doTdkSetup = False", "= self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text()", "else: operationsLog += \"Building TDK DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog += \"Would you", "self.versionNumber = self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog += 'Building", "SETUP: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog +=", "# doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else:", "if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore = True else:", "TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog += \"Building TDK in DEBUG: YES\\n\\n\" else: operationsLog", "\"Building TDK DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog += \"Would you like to continue", "False def resumeOperations(self): if not self.edtVersion.text(): raise MyException('Version number not specified!') if self.isOfficialVersion:", "True else: self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign =", "operationsLog += \"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog += \"Building CONFIGURATORE OVERLORD: NO\\n\\n\"", "if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: YES\\n\\n\" else: operationsLog", "main import * from builder_configuration import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load the", "\"Building OVERLORD SETUP: YES\\n\\n\" else: operationsLog += \"Building OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup:", "launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self):", "self.doBuildTdkDeviceDriver_Debug = False self.configurator = BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True", "self.doBuildOverlord: operationsLog += \"Building OVERLORD: YES\\n\" else: operationsLog += \"Building OVERLORD: NO\\n\" if", "uic.loadUiType(\"main-interface.ui\")[0] # Load the UI class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self,", "operationsLog += \"Building TDK DEVICE DRIVER SETUP: YES\\n\\n\" else: operationsLog += \"Building TDK", "pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\",", "True else: self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked(): self.doBuildTdk = True else: self.doBuildTdk =", "self.isOfficialVersion = True else: self.isOfficialVersion = False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign =", "you like to continue using these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of Operations\", operationsLog)", "else: return False def validate_builder(self): try: reply = self.resumeOperations() if reply: print(\"Operations will", "+= \"Building TDK DEVICE DRIVER in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK", "+= \"Building TDK in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK in DEBUG:", "operationsLog += \"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE OVERLORD: YES\\n\\n\"", "from main import * from builder_configuration import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load", "= True else: self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign", "= self.resumeOperations() if reply: print(\"Operations will be executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign,", "SETUP: NO\\n\\n\" operationsLog += \"Would you like to continue using these parameters?\" print(operationsLog)", "print('Directory %s has been created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory %s does not", "been created.'% (TDKfolderToBeChecked)) return True except MyException as e: QMessageBox. about(self, \"CheckDirectoryExistence -", "self.isOfficialVersion: operationsLog += 'Building an OFFICIAL VERSION of OVERLORD, version number is ['+", "if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord = True else:", "+ self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath", "QMessageBox.about(self, \"List of Operations\", \"Operations have been finished!\") pass app = QtGui.QApplication(sys.argv) myWindow", "False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug = False self.configurator = BuilderConfiguration() def getOfficialVersionType(self): if", "self.doBuildTdkDesign = True else: self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle()", "self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber,", "self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver()", "self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore = True else: self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked():", "self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else:", "return True else: return False def validate_builder(self): try: reply = self.resumeOperations() if reply:", "versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has been stopped by the operator!\")", "TDK DEVICE DRIVER: YES\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER: NO\\n\" if", "str(e)) return False def launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self):", "self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text()", "YES\\n\\n\" else: operationsLog += \"Building TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog += \"Building", "= False self.versionNumber = None self.doBuildTdkDesign = False self.doBuildOverlord = False self.doBuildConfiguratore =", "TDK versions!') self.versionNumber = self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog", "operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: return True else: return False def", "\"List of Operations\", \"Operations have been finished!\") pass app = QtGui.QApplication(sys.argv) myWindow =", "\"List of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: return True else:", "been created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory %s does not exist. It will", "creating folders for FTCM and TDK versions!') self.versionNumber = self.edtVersion.text() operationsLog = \"\"", "\"Building TDK in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK in DEBUG: NO\\n\\n\"", "builder_configuration import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load the UI class MyException(Exception): pass", "\"Building TDK DEVICE DRIVER: YES\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER: NO\\n\"", "if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore = True else: self.doBuildConfiguratore =", "launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self):", "been stopped by the operator!\") except MyException as e: QMessageBox.about(self, \"List of Operations", "\"Building TDK DEVICE DRIVER in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE", "NON_OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog", "NO\\n\" if self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog += \"Building", "= \"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog += 'Building an OFFICIAL VERSION of", "sys, os from PyQt4 import QtGui, QtCore, uic from PyQt4.QtGui import QMessageBox from", "if reply: print(\"Operations will be executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore,", "YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog += \"Would", "print('Directory %s does not exist. It will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s", "+ self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath", "QtCore, uic from PyQt4.QtGui import QMessageBox from main import * from builder_configuration import", "True self.doBuildConfiguratore = True else: self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked(): self.doBuildTdk = True", "MyException('Version number not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text()", "\"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked =", "self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise", "if self.doBuildTdk: operationsLog += \"Building TDK: YES\\n\" else: operationsLog += \"Building TDK: NO\\n\"", "in DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog += \"Building OVERLORD SETUP: YES\\n\\n\" else: operationsLog", "operationsLog += \"Building OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK DEVICE", "operationsLog += \"Building OVERLORD: YES\\n\" else: operationsLog += \"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore:", "None self.doBuildTdkDesign = False self.doBuildOverlord = False self.doBuildConfiguratore = False self.doBuildTdk = False", "self, doOfficial, doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug,", "exist. It will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been created.'% (FTCMfolderToBeChecked))", "return False def validate_builder(self): try: reply = self.resumeOperations() if reply: print(\"Operations will be", "False def validate_builder(self): try: reply = self.resumeOperations() if reply: print(\"Operations will be executed\")", "while creating folders for FTCM and TDK versions!') self.versionNumber = self.edtVersion.text() operationsLog =", "= True self.doBuildConfiguratore = True else: self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked(): self.doBuildTdk =", "(TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has been created.'% (TDKfolderToBeChecked)) return True except MyException as", "the UI class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent)", "self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else: self.isOfficialVersion = False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign", "stopped by the operator!\") except MyException as e: QMessageBox.about(self, \"List of Operations -", "will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been created.'% (FTCMfolderToBeChecked)) if not", "self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial,", "else: self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else: self.doTdkSetup = False", "will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has been created.'% (TDKfolderToBeChecked)) return True", "str(e)) return False def resumeOperations(self): if not self.edtVersion.text(): raise MyException('Version number not specified!')", "is ['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog += \"Building TDK in DESIGN: YES\\n\\n\"", "operationsLog += \"Building TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog += \"Building OVERLORD: YES\\n\"", "doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has been stopped by the operator!\") except MyException as", "+= \"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else:", "for FTCM and TDK versions!') self.versionNumber = self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList()", "DRIVER SETUP: NO\\n\\n\" operationsLog += \"Would you like to continue using these parameters?\"", "if self.isOfficialVersion: operationsLog += 'Building an OFFICIAL VERSION of OVERLORD, version number is", "DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: YES\\n\\n\"", "\"\\\\\" + self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while creating folders for", "self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else:", "MyException('Error while creating folders for FTCM and TDK versions!') self.versionNumber = self.edtVersion.text() operationsLog", "= False self.doBuildTdk = False self.doBuildTdkDeviceDriver = False self.doTdkSetup = False self.doTdkDeviceDriverSetup =", "if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while creating folders for FTCM and TDK", "if self.doBuildTdk_Debug: operationsLog += \"Building TDK in DEBUG: YES\\n\\n\" else: operationsLog += \"Building", "QMessageBox. about(self, \"CheckDirectoryExistence - Exception\", str(e)) return False def resumeOperations(self): if not self.edtVersion.text():", "self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore", "main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\", \"Operations have", "launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup()", "__init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber = None self.doBuildTdkDesign", "not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord = True else: self.doBuildOverlord = False", "operationsLog += \"Building TDK DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK", "if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else: self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup =", "TDK DEVICE DRIVER in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER", "validate_builder(self): try: reply = self.resumeOperations() if reply: print(\"Operations will be executed\") self.mainBuilder =", "= True else: self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else: self.doTdkSetup", "+= 'Building a NON_OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']!\\n\\n'", "self.doTdkSetup = False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug = False self.configurator", "self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked(): self.doBuildTdk = True else: self.doBuildTdk = False if", "self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: YES\\n\\n\" else: operationsLog +=", "self.configurator = BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else: self.isOfficialVersion =", "not exist. It will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has been created.'%", "DRIVER in DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog += \"Building OVERLORD SETUP: YES\\n\\n\" else:", "DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog += \"Building OVERLORD SETUP: YES\\n\\n\" else: operationsLog +=", "parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber = None self.doBuildTdkDesign = False self.doBuildOverlord", "= True self.doBuildOverlord = True else: self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked(): if not", "self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber = None self.doBuildTdkDesign = False self.doBuildOverlord = False", "self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug,", "YES\\n\\n\" else: operationsLog += \"Building TDK in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog +=", "Load the UI class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self,", "\"Building TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog += \"Building TDK in DEBUG: YES\\n\\n\" else:", "self.doBuildConfiguratore = False self.doBuildTdk = False self.doBuildTdkDeviceDriver = False self.doTdkSetup = False self.doTdkDeviceDriverSetup", "else: self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory", "operationsLog += \"Building TDK in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK in", "False self.doBuildTdk = False self.doBuildTdkDeviceDriver = False self.doTdkSetup = False self.doTdkDeviceDriverSetup = False", "version number is ['+ self.versionNumber +']\\n\\n' else: operationsLog += 'Building a NON_OFFICIAL VERSION", "if self.doBuildOverlord: operationsLog += \"Building OVERLORD: YES\\n\" else: operationsLog += \"Building OVERLORD: NO\\n\"", "getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else: self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked(): if", "in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK DEVICE DRIVER: YES\\n\" else:", "False if self.chkBuildTdk.isChecked(): self.doBuildTdk = True else: self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver", "pass def launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self,", "except MyException as e: QMessageBox. about(self, \"CheckDirectoryExistence - Exception\", str(e)) return False def", "does not exist. It will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has been", "SETUP: YES\\n\\n\" else: operationsLog += \"Building OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog +=", "self.doBuildTdk: operationsLog += \"Building TDK: YES\\n\" else: operationsLog += \"Building TDK: NO\\n\" if", "Temperature-conversion program using PyQt import sys, os from PyQt4 import QtGui, QtCore, uic", "pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full()", "self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver, #", "self.doBuildTdkDesign: operationsLog += \"Building TDK in DESIGN: YES\\n\\n\" else: operationsLog += \"Building TDKin", "operationsLog += \"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog += \"Building TDK: YES\\n\"", "\"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog += 'Building an OFFICIAL VERSION of OVERLORD,", "self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog += \"Building CONFIGURATORE OVERLORD:", "exist. It will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has been created.'% (TDKfolderToBeChecked))", "launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self):", "self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore = True else: self.doBuildConfiguratore = False if", "not os.path.exists(TDKfolderToBeChecked): print('Directory %s does not exist. It will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked)", "else: operationsLog += \"Building OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK", "self.main_workflow() else: print(\"Operation has been stopped by the operator!\") except MyException as e:", "else: operationsLog += \"Building TDK DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building", "if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord = True else: self.doBuildOverlord =", "+= \"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog += \"Building TDK: YES\\n\" else:", "os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory %s does", "= False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked():", "PyQt import sys, os from PyQt4 import QtGui, QtCore, uic from PyQt4.QtGui import", "self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s", "if reply == QtGui.QMessageBox.Yes: return True else: return False def validate_builder(self): try: reply", "\"Building TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog += \"Building OVERLORD: YES\\n\" else: operationsLog", "DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK DEVICE DRIVER: YES\\n\" else: operationsLog", "FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\" +", "%s does not exist. It will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has", "program using PyQt import sys, os from PyQt4 import QtGui, QtCore, uic from", "be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has been created.'% (TDKfolderToBeChecked)) return True except", "else: self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver = False", "operationsLog += \"Building TDK DEVICE DRIVER: YES\\n\" else: operationsLog += \"Building TDK DEVICE", "launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of", "these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of Operations\", operationsLog) reply = QMessageBox.question(self, \"List of", "self.doBuildOverlord = True else: self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle()", "MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) #", "operationsLog += \"Would you like to continue using these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List", "+= \"Building TDK DEVICE DRIVER in DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog += \"Building", "= True else: self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug", "be executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup,", "- Exception\", str(e)) return False def resumeOperations(self): if not self.edtVersion.text(): raise MyException('Version number", "self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text() else:", "self.versionNumber = None self.doBuildTdkDesign = False self.doBuildOverlord = False self.doBuildConfiguratore = False self.doBuildTdk", "\"List of Operations - Exception\", str(e)) return False def launch_vb6(self): self.mainBuilder.buildVb6() pass def", "def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\", \"Operations", "has been stopped by the operator!\") except MyException as e: QMessageBox.about(self, \"List of", "self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord", "if self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK DEVICE DRIVER SETUP: YES\\n\\n\" else: operationsLog +=", "launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design()", "= False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does", "NO\\n\\n\" if self.doBuildTdk: operationsLog += \"Building TDK: YES\\n\" else: operationsLog += \"Building TDK:", "Operations\", operationsLog) reply = QMessageBox.question(self, \"List of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply", "else: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog", "BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load the UI class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow,", "in DESIGN: YES\\n\\n\" else: operationsLog += \"Building TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog", "in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver:", "operationsLog += \"Building TDK DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog += \"Would you like", "True else: self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else: self.doTdkSetup =", "TDK in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK DEVICE DRIVER: YES\\n\"", "os from PyQt4 import QtGui, QtCore, uic from PyQt4.QtGui import QMessageBox from main", "NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK DEVICE DRIVER: YES\\n\" else: operationsLog +=", "self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup()", "self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath +", "class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False", "doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has", "else: print(\"Operation has been stopped by the operator!\") except MyException as e: QMessageBox.about(self,", "def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not exist.", "OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK DEVICE DRIVER SETUP: YES\\n\\n\"", "# Load the UI class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None):", "self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\", \"Operations have been", "- Exception\", str(e)) return False def launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass", "if self.doBuildTdkDesign: operationsLog += \"Building TDK in DESIGN: YES\\n\\n\" else: operationsLog += \"Building", "def launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def", "self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver = False if", "TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog += \"Building OVERLORD: YES\\n\" else: operationsLog +=", "CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog += \"Building TDK: YES\\n\" else: operationsLog +=", "YES\\n\\n\" else: operationsLog += \"Building OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog += \"Building", "MyException as e: QMessageBox.about(self, \"List of Operations - Exception\", str(e)) return False def", "= False if self.chkBuildTdk.isChecked(): self.doBuildTdk = True else: self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked():", "Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: return True else: return False", "the operator!\") except MyException as e: QMessageBox.about(self, \"List of Operations - Exception\", str(e))", "(TDKfolderToBeChecked)) return True except MyException as e: QMessageBox. about(self, \"CheckDirectoryExistence - Exception\", str(e))", "QMessageBox from main import * from builder_configuration import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] #", "self.doTdkSetup = True else: self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else:", "a NON_OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign:", "return False def launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal()", "DESIGN: YES\\n\\n\" else: operationsLog += \"Building TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog +=", "YES\\n\" else: operationsLog += \"Building TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog += \"Building TDK", "= False self.doBuildTdkDeviceDriver = False self.doTdkSetup = False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug =", "False self.doTdkSetup = False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug = False", "number not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked", "= MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug)", "NO\\n\\n\" if self.doBuildOverlord: operationsLog += \"Building OVERLORD: YES\\n\" else: operationsLog += \"Building OVERLORD:", "= False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug = False self.configurator = BuilderConfiguration() def getOfficialVersionType(self):", "of OVERLORD, version number is ['+ self.versionNumber +']\\n\\n' else: operationsLog += 'Building a", "else: operationsLog += \"Building TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord: operationsLog += \"Building OVERLORD:", "# self, doOfficial, doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber, #", "Operations - Exception\", str(e)) return False def launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign()", "pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass", "QtGui, QtCore, uic from PyQt4.QtGui import QMessageBox from main import * from builder_configuration", "+= \"Building OVERLORD SETUP: YES\\n\\n\" else: operationsLog += \"Building OVERLORD SETUP: NO\\n\\n\" if", "= uic.loadUiType(\"main-interface.ui\")[0] # Load the UI class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class): def", "not self.edtVersion.text(): raise MyException('Version number not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath +", "form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load the UI class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class):", "= True else: self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked(): self.doBuildTdk = True else: self.doBuildTdk", "TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error", "+ self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while creating folders for FTCM", "True else: self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup =", "= self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text()", "+ \"\\\\\" + self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while creating folders", "= False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked():", "not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while creating folders for FTCM and TDK versions!')", "False def launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass", "+ \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text() if not", "False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug = False self.configurator = BuilderConfiguration()", "def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else: self.isOfficialVersion = False def getOperationsList(self):", "else: operationsLog += \"Building TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog += \"Building TDK in", "True else: self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug =", "self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog += \"Building TDK in DESIGN: YES\\n\\n\" else: operationsLog", "YES\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog +=", "'Building a NON_OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']!\\n\\n' if", "self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while creating folders for FTCM and", "self.doTdkSetup: operationsLog += \"Building OVERLORD SETUP: YES\\n\\n\" else: operationsLog += \"Building OVERLORD SETUP:", "is ['+ self.versionNumber +']\\n\\n' else: operationsLog += 'Building a NON_OFFICIAL VERSION of OVERLORD,", "False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug", "+= 'Building an OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']\\n\\n'", "False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup", "VERSION of OVERLORD, version number is ['+ self.versionNumber +']\\n\\n' else: operationsLog += 'Building", "= False self.doBuildConfiguratore = False self.doBuildTdk = False self.doBuildTdkDeviceDriver = False self.doTdkSetup =", "MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion", "self.isOfficialVersion = False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else: self.doBuildTdkDesign =", "operationsLog += 'Building an OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber", "OVERLORD SETUP: YES\\n\\n\" else: operationsLog += \"Building OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog", "False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not", "operationsLog += \"Building TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog += \"Building TDK in DEBUG:", "else: self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup = False", "else: self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked(): self.doBuildTdk = True else: self.doBuildTdk = False", "except MyException as e: QMessageBox.about(self, \"List of Operations - Exception\", str(e)) return False", "OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog +=", "and TDK versions!') self.versionNumber = self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion:", "else: operationsLog += \"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog += \"Building TDK:", "self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK DEVICE DRIVER SETUP: YES\\n\\n\" else: operationsLog += \"Building", "self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True", "doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation", "YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: NO\\n\\n\" if self.doTdkSetup:", "self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\", \"Operations have been finished!\")", "raise MyException('Error while creating folders for FTCM and TDK versions!') self.versionNumber = self.edtVersion.text()", "self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup,", "if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug =", "\"Would you like to continue using these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of Operations\",", "self.doBuildOverlord = False self.doBuildConfiguratore = False self.doBuildTdk = False self.doBuildTdkDeviceDriver = False self.doTdkSetup", "of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: return True else: return", "False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else: self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup", "DRIVER SETUP: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog", "not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked =", "MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber", "self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore = True else: self.doBuildConfiguratore", "def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6() self.launch_tdk_full() self.launch_tdkdevicedriver()", "\"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog += \"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk:", "#QMessageBox.about(self, \"List of Operations\", operationsLog) reply = QMessageBox.question(self, \"List of Operations\", operationsLog, QtGui.QMessageBox.Yes,", "(FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory %s does not exist. It will be created.'%", "getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else: self.isOfficialVersion = False def getOperationsList(self): if", "= False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug = False self.configurator =", "self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else: self.doTdkSetup = False if", "from builder_configuration import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load the UI class MyException(Exception):", "self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass def main_workflow(self): self.launch_tdk_design() self.launch_vb6()", "+= \"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog += \"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if", "VERSION of OVERLORD, version number is ['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog +=", "= False if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildConfiguratore =", "True else: self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign =", "self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while creating", "False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug", "self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else:", "self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord,", "self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath +", "e: QMessageBox.about(self, \"List of Operations - Exception\", str(e)) return False def launch_vb6(self): self.mainBuilder.buildVb6()", "\"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked,", "folders for FTCM and TDK versions!') self.versionNumber = self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType()", "\"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked =", "Exception\", str(e)) return False def resumeOperations(self): if not self.edtVersion.text(): raise MyException('Version number not", "uic from PyQt4.QtGui import QMessageBox from main import * from builder_configuration import BuilderConfiguration", "\"Building TDK in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK DEVICE DRIVER:", "self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog += 'Building an OFFICIAL VERSION of OVERLORD, version", "+= \"Building TDK: YES\\n\" else: operationsLog += \"Building TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog", "doOfficial, doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug", "self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord = True else: self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked():", "if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not exist. It will be created.'% (FTCMfolderToBeChecked))", "\"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog += \"Building TDK: YES\\n\" else: operationsLog", "if self.chkBuildTdk.isChecked(): self.doBuildTdk = True else: self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver =", "self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True", "TDKfolderToBeChecked): raise MyException('Error while creating folders for FTCM and TDK versions!') self.versionNumber =", "self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try:", "\"Operations have been finished!\") pass app = QtGui.QApplication(sys.argv) myWindow = MyWindowClass(None) myWindow.show() app.exec_()", "self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug = False self.configurator = BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked():", "import sys, os from PyQt4 import QtGui, QtCore, uic from PyQt4.QtGui import QMessageBox", "DEVICE DRIVER SETUP: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER SETUP: NO\\n\\n\"", "= None self.doBuildTdkDesign = False self.doBuildOverlord = False self.doBuildConfiguratore = False self.doBuildTdk =", "self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else: self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True", "self.doBuildTdkDesign = True self.doBuildOverlord = True else: self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked(): if", "of Operations\", \"Operations have been finished!\") pass app = QtGui.QApplication(sys.argv) myWindow = MyWindowClass(None)", "if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.OfficialTdkPath +", "False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self, FTCMfolderToBeChecked,", "self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self,", "print('Directory %s does not exist. It will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s", "OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog += \"Building TDK: YES\\n\" else: operationsLog += \"Building", "reply = self.resumeOperations() if reply: print(\"Operations will be executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion,", "self.doBuildTdk_Debug: operationsLog += \"Building TDK in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK", "= False self.doBuildOverlord = False self.doBuildConfiguratore = False self.doBuildTdk = False self.doBuildTdkDeviceDriver =", "+= \"Building TDK in DESIGN: YES\\n\\n\" else: operationsLog += \"Building TDKin DESIGN: NO\\n\\n\"", "CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog += \"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog", "%s has been created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory %s does not exist.", "self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog += 'Building an OFFICIAL", "continue using these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of Operations\", operationsLog) reply = QMessageBox.question(self,", "True else: self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug =", "doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow()", "+= \"Building TDK in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK DEVICE", "BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else: self.isOfficialVersion = False def", "def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else: self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked():", "of Operations\", operationsLog) reply = QMessageBox.question(self, \"List of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if", "if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver = False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup =", "self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass", "not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not exist. It will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked)", "NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK DEVICE DRIVER SETUP: YES\\n\\n\" else: operationsLog", "self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver,", "executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber,", "NO\\n\\n\" operationsLog += \"Would you like to continue using these parameters?\" print(operationsLog) #QMessageBox.about(self,", "def resumeOperations(self): if not self.edtVersion.text(): raise MyException('Version number not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked", "+ \"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked", "else: self.doBuildOverlord = False if self.chkBuildConfiguratore.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True", "self.chkBuildTdk.isChecked(): self.doBuildTdk = True else: self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True", "number is ['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog += \"Building TDK in DESIGN:", "\"Building TDK in DESIGN: YES\\n\\n\" else: operationsLog += \"Building TDKin DESIGN: NO\\n\\n\" if", "self.getOperationsList() if self.isOfficialVersion: operationsLog += 'Building an OFFICIAL VERSION of OVERLORD, version number", "created.'% (TDKfolderToBeChecked)) return True except MyException as e: QMessageBox. about(self, \"CheckDirectoryExistence - Exception\",", "operationsLog += \"Building TDK in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK", "= False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self,", "return False def resumeOperations(self): if not self.edtVersion.text(): raise MyException('Version number not specified!') if", "= self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\" +", "self.versionNumber +']\\n\\n' else: operationsLog += 'Building a NON_OFFICIAL VERSION of OVERLORD, version number", "self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber = None self.doBuildTdkDesign = False self.doBuildOverlord =", "pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass def launch_tdkdevicedriver_setup(self): pass def", "else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\" + self.edtVersion.text() TDKfolderToBeChecked = self.configurator.NonOfficialTdkPath + \"\\\\\"", "self.doBuildTdkDeviceDriver: operationsLog += \"Building TDK DEVICE DRIVER: YES\\n\" else: operationsLog += \"Building TDK", "self.doBuildTdkDesign = True self.doBuildConfiguratore = True else: self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked(): self.doBuildTdk", "QMessageBox.question(self, \"List of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: return True", "parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of Operations\", operationsLog) reply = QMessageBox.question(self, \"List of Operations\",", "== QtGui.QMessageBox.Yes: return True else: return False def validate_builder(self): try: reply = self.resumeOperations()", "operator!\") except MyException as e: QMessageBox.about(self, \"List of Operations - Exception\", str(e)) return", "os.makedirs(TDKfolderToBeChecked) print('Directory %s has been created.'% (TDKfolderToBeChecked)) return True except MyException as e:", "= False if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord =", "import * from builder_configuration import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load the UI", "os.path.exists(TDKfolderToBeChecked): print('Directory %s does not exist. It will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory", "has been created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory %s does not exist. It", "TDK DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK DEVICE DRIVER in", "by the operator!\") except MyException as e: QMessageBox.about(self, \"List of Operations - Exception\",", "It will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been created.'% (FTCMfolderToBeChecked)) if", "operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog += 'Building an OFFICIAL VERSION", "created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has been created.'% (TDKfolderToBeChecked)) return True except MyException", "not exist. It will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been created.'%", "operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: YES\\n\\n\" else: operationsLog += \"Building", "import QMessageBox from main import * from builder_configuration import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0]", "DEVICE DRIVER in DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog += \"Building OVERLORD SETUP: YES\\n\\n\"", "doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has been stopped by", "self.doBuildTdk = False self.doBuildTdkDeviceDriver = False self.doTdkSetup = False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug", "if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked):", "= False self.doBuildTdkDeviceDriver_Debug = False self.configurator = BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion", "Operations\", \"Operations have been finished!\") pass app = QtGui.QApplication(sys.argv) myWindow = MyWindowClass(None) myWindow.show()", "# doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has been stopped by the operator!\") except", "else: operationsLog += \"Building TDK in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog += \"Building", "raise MyException('Version number not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked = self.configurator.OfficialExePath + \"\\\\\" +", "def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber = None", "self.doBuildConfiguratore = True else: self.doBuildConfiguratore = False if self.chkBuildTdk.isChecked(): self.doBuildTdk = True else:", "reply = QMessageBox.question(self, \"List of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes:", "OVERLORD: YES\\n\\n\" else: operationsLog += \"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog +=", "DRIVER: YES\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog", "using PyQt import sys, os from PyQt4 import QtGui, QtCore, uic from PyQt4.QtGui", "It will be created.'% (TDKfolderToBeChecked)) os.makedirs(TDKfolderToBeChecked) print('Directory %s has been created.'% (TDKfolderToBeChecked)) return", "\"Building OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK DEVICE DRIVER SETUP:", "operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog +=", "of OVERLORD, version number is ['+ self.versionNumber +']!\\n\\n' if self.doBuildTdkDesign: operationsLog += \"Building", "TDKfolderToBeChecked = self.configurator.OfficialTdkPath + \"\\\\\" + self.edtVersion.text() else: FTCMfolderToBeChecked = self.configurator.NonOfficialExePath + \"\\\\\"", "like to continue using these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of Operations\", operationsLog) reply", "from PyQt4.QtGui import QMessageBox from main import * from builder_configuration import BuilderConfiguration form_class", "self.doBuildTdk = True else: self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else:", "print(\"Operations will be executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver,", "from PyQt4 import QtGui, QtCore, uic from PyQt4.QtGui import QMessageBox from main import", "self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True", "Exception\", str(e)) return False def launch_vb6(self): self.mainBuilder.buildVb6() pass def launch_tdk_design(self): self.mainBuilder.buildTdkDesign() pass def", "self.mainBuilder.buildTdkDesign() pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup()", "MyException as e: QMessageBox. about(self, \"CheckDirectoryExistence - Exception\", str(e)) return False def resumeOperations(self):", "QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: return True else: return False def validate_builder(self): try:", "self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if", "does not exist. It will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been", "if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else: self.isOfficialVersion = False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked():", "print(\"Operation has been stopped by the operator!\") except MyException as e: QMessageBox.about(self, \"List", "self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup, self.doTdkDeviceDriverSetup, self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord, # doConfiguratore,", "DEVICE DRIVER SETUP: NO\\n\\n\" operationsLog += \"Would you like to continue using these", "False self.doBuildTdkDeviceDriver_Debug = False self.configurator = BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion =", "\"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE OVERLORD: YES\\n\\n\" else: operationsLog", "parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber = None self.doBuildTdkDesign =", "self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\", \"Operations have been finished!\") pass app =", "+= \"Building OVERLORD: YES\\n\" else: operationsLog += \"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog", "True else: self.isOfficialVersion = False def getOperationsList(self): if self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else:", "+']!\\n\\n' if self.doBuildTdkDesign: operationsLog += \"Building TDK in DESIGN: YES\\n\\n\" else: operationsLog +=", "e: QMessageBox. about(self, \"CheckDirectoryExistence - Exception\", str(e)) return False def resumeOperations(self): if not", "reply == QtGui.QMessageBox.Yes: return True else: return False def validate_builder(self): try: reply =", "= False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked():", "= self.configurator.NonOfficialTdkPath + \"\\\\\" + self.edtVersion.text() if not self.checkDirectoriesExistence(FTCMfolderToBeChecked, TDKfolderToBeChecked): raise MyException('Error while", "pass class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion =", "False self.versionNumber = None self.doBuildTdkDesign = False self.doBuildOverlord = False self.doBuildConfiguratore = False", "= BuilderConfiguration() def getOfficialVersionType(self): if self.chkOfficialVersion.isChecked(): self.isOfficialVersion = True else: self.isOfficialVersion = False", "self.chkBuildTdkDesign.isChecked(): self.doBuildTdkDesign = True else: self.doBuildTdkDesign = False if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign:", "= True else: self.doBuildTdk = False if self.chkBuildTdkDeviceDriver.isChecked(): self.doBuildTdkDeviceDriver = True else: self.doBuildTdkDeviceDriver", "DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG: NO\\n\\n\" if", "operationsLog += 'Building a NON_OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber", "False self.doBuildOverlord = False self.doBuildConfiguratore = False self.doBuildTdk = False self.doBuildTdkDeviceDriver = False", "an OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']\\n\\n' else: operationsLog", "self.doBuildTdkDesign = False self.doBuildOverlord = False self.doBuildConfiguratore = False self.doBuildTdk = False self.doBuildTdkDeviceDriver", "\"Building TDK DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK DEVICE DRIVER", "+= \"Building OVERLORD SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK DEVICE DRIVER", "UI class MyException(Exception): pass class MyWindowClass(QtGui.QMainWindow, form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self)", "doTdkDeviceDriver, # doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has been", "= False if self.chkDoTdkSetup.isChecked(): self.doTdkSetup = True else: self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked():", "= False self.doTdkSetup = False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug = False self.doBuildTdkDeviceDriver_Debug =", "PyQt4 import QtGui, QtCore, uic from PyQt4.QtGui import QMessageBox from main import *", "if self.doTdkSetup: operationsLog += \"Building OVERLORD SETUP: YES\\n\\n\" else: operationsLog += \"Building OVERLORD", "\"Building TDK: YES\\n\" else: operationsLog += \"Building TDK: NO\\n\" if self.doBuildTdk_Debug: operationsLog +=", "(FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory %s", "operationsLog) reply = QMessageBox.question(self, \"List of Operations\", operationsLog, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply ==", "# doTdkSetup, doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has been stopped", "using these parameters?\" print(operationsLog) #QMessageBox.about(self, \"List of Operations\", operationsLog) reply = QMessageBox.question(self, \"List", "* from builder_configuration import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load the UI class", "import BuilderConfiguration form_class = uic.loadUiType(\"main-interface.ui\")[0] # Load the UI class MyException(Exception): pass class", "OVERLORD, version number is ['+ self.versionNumber +']\\n\\n' else: operationsLog += 'Building a NON_OFFICIAL", "pass def launch_tdk_full(self): self.mainBuilder.buildTdkFinal() pass def launch_tdkdevicedriver(self): self.mainBuilder.buildTdkDeviceDriver() pass def launch_tdk_setup(self): self.mainBuilder.buildTdkSetup() pass", "import QtGui, QtCore, uic from PyQt4.QtGui import QMessageBox from main import * from", "False if self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord = True", "else: self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug = False", "QMessageBox.about(self, \"List of Operations - Exception\", str(e)) return False def launch_vb6(self): self.mainBuilder.buildVb6() pass", "# Temperature-conversion program using PyQt import sys, os from PyQt4 import QtGui, QtCore,", "self.doBuildTdk_Debug = False if self.chkDoBuildTdkDeviceDriver_Debug.isChecked(): self.doBuildTdkDeviceDriver_Debug = True else: self.doBuildTdkDeviceDriver_Debug = False def", "%s does not exist. It will be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has", "else: operationsLog += \"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE OVERLORD:", "DRIVER in DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER in DEBUG:", "'Building an OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']\\n\\n' else:", "be created.'% (FTCMfolderToBeChecked)) os.makedirs(FTCMfolderToBeChecked) print('Directory %s has been created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked):", "if not os.path.exists(TDKfolderToBeChecked): print('Directory %s does not exist. It will be created.'% (TDKfolderToBeChecked))", "= True else: self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug", "form_class): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber =", "resumeOperations(self): if not self.edtVersion.text(): raise MyException('Version number not specified!') if self.isOfficialVersion: FTCMfolderToBeChecked =", "= True else: self.doBuildTdkDeviceDriver_Debug = False def checkDirectoriesExistence(self, FTCMfolderToBeChecked, TDKfolderToBeChecked): try: if not", "+= \"Building TDK DEVICE DRIVER: NO\\n\" if self.doBuildTdkDeviceDriver_Debug: operationsLog += \"Building TDK DEVICE", "doTdkDeviceDriverSetup, versionNumber, # doTdk_Debug, doTdkDeviceDriver_Debug self.main_workflow() else: print(\"Operation has been stopped by the", "self.doTdkDeviceDriverSetup = False if self.chkDoBuildTdk_Debug.isChecked(): self.doBuildTdk_Debug = True else: self.doBuildTdk_Debug = False if", "True else: return False def validate_builder(self): try: reply = self.resumeOperations() if reply: print(\"Operations", "QtGui.QMainWindow.__init__(self, parent) self.setupUi(self) self.btnStart.clicked.connect(self.validate_builder) self.isOfficialVersion = False self.versionNumber = None self.doBuildTdkDesign = False", "self.doTdkSetup = False if self.chkDoTdkDeviceDriverSetup.isChecked(): self.doTdkDeviceDriverSetup = True else: self.doTdkDeviceDriverSetup = False if", "+= \"Building TDK DEVICE DRIVER SETUP: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE", "self.versionNumber, self.doBuildTdk_Debug, self.doBuildTdkDeviceDriver_Debug) # self, doOfficial, doOverlord, # doConfiguratore, doTdk, doTdkDeviceDriver, # doTdkSetup,", "TDKfolderToBeChecked): try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not exist. It will be", "['+ self.versionNumber +']\\n\\n' else: operationsLog += 'Building a NON_OFFICIAL VERSION of OVERLORD, version", "YES\\n\\n\" else: operationsLog += \"Building CONFIGURATORE OVERLORD: NO\\n\\n\" if self.doBuildTdk: operationsLog += \"Building", "TDK DEVICE DRIVER SETUP: YES\\n\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER SETUP:", "YES\\n\" else: operationsLog += \"Building OVERLORD: NO\\n\" if self.doBuildConfiguratore: operationsLog += \"Building CONFIGURATORE", "def validate_builder(self): try: reply = self.resumeOperations() if reply: print(\"Operations will be executed\") self.mainBuilder", "DEBUG: YES\\n\\n\" else: operationsLog += \"Building TDK in DEBUG: NO\\n\\n\" if self.doBuildTdkDeviceDriver: operationsLog", "False self.doBuildTdkDeviceDriver = False self.doTdkSetup = False self.doTdkDeviceDriverSetup = False self.doBuildTdk_Debug = False", "operationsLog += \"Building TDK: YES\\n\" else: operationsLog += \"Building TDK: NO\\n\" if self.doBuildTdk_Debug:", "self.launch_tdk_full() self.launch_tdkdevicedriver() self.launch_tdk_setup() self.launch_tdkdevicedriver_setup() QMessageBox.about(self, \"List of Operations\", \"Operations have been finished!\") pass", "self.chkBuildOverlord.isChecked(): if not self.doBuildTdkDesign: self.chkBuildTdkDesign.toggle() self.doBuildTdkDesign = True self.doBuildOverlord = True else: self.doBuildOverlord", "try: if not os.path.exists(FTCMfolderToBeChecked): print('Directory %s does not exist. It will be created.'%", "created.'% (FTCMfolderToBeChecked)) if not os.path.exists(TDKfolderToBeChecked): print('Directory %s does not exist. It will be", "= self.edtVersion.text() operationsLog = \"\" self.getOfficialVersionType() self.getOperationsList() if self.isOfficialVersion: operationsLog += 'Building an", "SETUP: NO\\n\\n\" if self.doTdkDeviceDriverSetup: operationsLog += \"Building TDK DEVICE DRIVER SETUP: YES\\n\\n\" else:", "TDK DEVICE DRIVER in DEBUG: NO\\n\\n\" if self.doTdkSetup: operationsLog += \"Building OVERLORD SETUP:", "will be executed\") self.mainBuilder = MainBuilder( self.isOfficialVersion, self.doBuildTdkDesign, self.doBuildOverlord, self.doBuildConfiguratore, self.doBuildTdk, self.doBuildTdkDeviceDriver, self.doTdkSetup,", "+= \"Building TDK DEVICE DRIVER: YES\\n\" else: operationsLog += \"Building TDK DEVICE DRIVER:", "OFFICIAL VERSION of OVERLORD, version number is ['+ self.versionNumber +']\\n\\n' else: operationsLog +=", "True except MyException as e: QMessageBox. about(self, \"CheckDirectoryExistence - Exception\", str(e)) return False", "TDK in DESIGN: YES\\n\\n\" else: operationsLog += \"Building TDKin DESIGN: NO\\n\\n\" if self.doBuildOverlord:" ]
[ "int, image: bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc, image) image_list = list(image) for byte", "response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self):", "return image def FillDataToFitSector(data: bytes, size: int) -> bytes: if len(data) != size:", "bool = True): ''' ISP echos host when enabled ''' if on: command", "ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To Flash\") #sleep(.2) def Go(self, address: int, thumb_mode:", "except TimeoutError: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip())", "self.SyncString.strip() in frame_in.strip(): synced = True break except TimeoutError: pass if not synced:", "NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code Failure in {} {} {}\".format(call_name, GetErrorCodeName(code), code)) def", "0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\" : 0xb,", "(NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code = self.WriteCommand(\"N\")", "self.SectorCount - 1): return sector return self.SectorCount - 1 def ReadSector(self, sector: int)", "bytes, size: int) -> bytes: if len(data) != size: data += bytes([0xff] *(size", "except TimeoutError: return @timeout(4) def ReadMemory(self, start: int, num_bytes: int): assert num_bytes%self.kWordSize ==", "= self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self,", "int, call_name: str) -> None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code", "\"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\" : 0x15,", "} def GetErrorCodeName(code: int) -> str: code = int(code) for item in NXPReturnCodes.items():", ": 0x18, \"Unused 3\" : 0x19, \"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\" : 0x1b,", "response_code = self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start: int, end:", "transferr data = [] while self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch) if len(data) !=", "Sectors\") def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes) assert", "= 0x104c11db6 crc = Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame)", "= prog print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count <=", "0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" : 0x12,", "self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self) -> int: for sector in", "\"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\"", "if code == item[1]: return item[0] return \"Not Found\" def RaiseReturnCodeError(code: int, call_name:", "Signature\") sig = [] for i in range(4): sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self):", "#image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog)) sector_count =", "pass if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return response_code", "return (1<<32) - (csum % (1<<32)) def Crc32(frame) -> int: #CRC32 polynomial =", "{} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self, on: bool = True): '''", "length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1) and length <= self.FlashRange[1]", "-> str: code = int(code) for item in NXPReturnCodes.items(): if code == item[1]:", "return code with no response ''' code = self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self,", "= FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self, image_file:", "0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7,", "'rb') as f: prog = f.read() image = prog print(\"Program Length:\", len(prog)) sector_count", "1 def ReadSector(self, sector: int) -> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize ==", "''' LPC84x sends a 0x1a first for some reason. Also the boot version", "ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush() for _ in range(2): try:", "''' mode = \"\" if thumb_mode: mode = 'T' response_code = self.WriteCommand(\"G %d", "skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already equal to RAM, skipping write\") return", "timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms import Crc from .ISPChip import ISPChip NXPReturnCodes =", "= self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM Address:", "intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 # clear csum value csum = CalculateCheckSum(intvecs_list)", "sends a 0x1a first for some reason. Also the boot version seems to", "timeout_decorator import timeout from timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms import Crc from .ISPChip", "#assert self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self, image_file: str, start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes", "%d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self, start: int, end: int): response_code =", "the memory map are equal ''' def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes:", "print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self, image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert", "reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) ->", "print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write", "RaiseReturnCodeError(code, call_name) def Write(self, string : bytes) -> None: #print(out) assert(type(string) is bytes)", "CheckSectorsBlank(self, start: int, end: int) -> bool: assert start <= end response_code =", "SetBaudRate(self, baud_rate: int, stop_bits: int = 1): ''' Baud Depends of FAIM config,", "specified spot ''' mode = \"\" if thumb_mode: mode = 'T' response_code =", "inserting a checksum in the correct place vector_table_size = 8 kuint32_t_size = 4", "Go ''' self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate: int,", ": 0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\"", "return @timeout(4) def ReadMemory(self, start: int, num_bytes: int): assert num_bytes%self.kWordSize == 0 assert", "print(\"Flash already equal to RAM, skipping write\") return except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector,", "self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def", "Make byte array into list of little endian 32 bit words intvecs =", "1\" : 0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\" : 0x16, \"EFRO_NO_POWER\" : 0x17,", "orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data: bytes, size: int) -> bytes: if len(data) !=", "if len(data) != size: data += bytes([0xff] *(size - len(data))) return data class", "int, end: int): response_code = self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def", "sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[(sector-start_sector) * sector_bytes : (sector - start_sector", "8 #StopBits = 1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified =", "if two sections in the memory map are equal ''' def MemoryLocationsEqual(self, address1:", "self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start, num_bytes)) response_code =", "int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified = False for i in range(3): try:", "start_sector + sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[(sector-start_sector) * sector_bytes : (sector", "data_chunk = image[sector * sector_bytes : (sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk)", "self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as f: prog = f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation,", "sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file:", "\"Write to RAM\") self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError: return", ": 0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused 3\" : 0x19, \"Unused 4\" : 0x1a,", "num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self, start: int, end: int, wait_states:", "self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self, image_file: str): sector_bytes =", "amount of data\") if isinstance(type(data), data_read): raise TypeError(\"data written and data read are", "for entry in frame: csum += entry return (1<<32) - (csum % (1<<32))", "self.ReadLine() if self.SyncString.strip() in frame_in.strip(): synced = True break except TimeoutError: pass if", "sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\")", "\"Erase Pages\") def CheckSectorsBlank(self, start: int, end: int) -> bool: assert start <=", "assert start <= end response_code = self.WriteCommand(\"I %d %d\"%(start, end)) try: self.ReadLine() response", "= self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine()", "TimeoutError: pass verified = False for i in range(3): try: frame_in = self.ReadLine()#Should", "str: code = int(code) for item in NXPReturnCodes.items(): if code == item[1]: return", "int: try: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except TimeoutError: response_code = self.WriteCommand(\"S", "def PrepSectorsForWrite(self, start: int, end: int): try: response_code = self.WriteCommand(\"P %d %d\"%(start, end))", "%d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig = [] for i", "thumb_mode: bool = False): ''' Start executing code at the specified spot '''", "%d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in)", "assert sector_count <= self.SectorCount for sector in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk =", "synced = False self.ClearSerialConnection() self.Flush() for i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' +", "range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try: frame_in = self.ReadLine() if self.SyncString.strip() in", "data class NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes = 64 SectorSizePages = 16 MaxByteTransfer", "= self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self, on: bool =", "return address >= self.RAMRange[0] and address <= self.RAMRange[1] def RamRangeLegal(self, address, length): return", "- self.FlashRange[0] and address%self.kPageSizeBytes == 0 def RamAddressLegal(self, address): return address >= self.RAMRange[0]", "except Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data)", "self.Write(\"W %d %d\"%(start + i, kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream", "sector in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[sector * sector_bytes : (sector", "0x15, \"Unused 2\" : 0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused 3\"", "make this a valid image by inserting a checksum in the correct place", "RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig = [] for i in range(4): sig.append(self.ReadLine().strip()) return", "int): response_code = self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self, start:", "#self.Write('?' + self.kNewLine) try: frame_in = self.ReadLine() if self.SyncString.strip() in frame_in.strip(): synced =", "return except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime)", "the docs say ''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor =", "== data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self, sector: int, data: bytes): #assert", "else: command = \"A 0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self,", "-> str: assert start < end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d", "for item in NXPReturnCodes.items(): if code == item[1]: return item[0] return \"Not Found\"", "def FlashRangeLegal(self, address, length): print(self.FlashRange, address, length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length", "WriteToRam(self, start: int, data: bytes): assert len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start, len(data)) print(\"Write", "self.ReadLine() except TimeoutError: pass def InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning,", "self.Write(self.SyncStringBytes)#echo SyncString try: frame_in = self.ReadLine()#discard echo except TimeoutError: pass verified = False", "num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To Flash\") #sleep(.2) def Go(self, address: int, thumb_mode: bool", "12000#khz == 30MHz self.SectorCount = 0 self.RAMSize = 0 self.RAMRange = [0, 0]", "sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc", "self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc == data_crc assert", "already equal to RAM, skipping write\") return except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector)", "Memory\") while len(self.data_buffer_in) < (num_bytes): self.Read() # Command success is sent at the", "Sectors Blank response\", response) except TimeoutError: pass if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]):", "self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for uid in uuids]) def ReadCRC(self,", "FillDataToFitSector(data: bytes, size: int) -> bytes: if len(data) != size: data += bytes([0xff]", "+ length - 1) and length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes ==", "1) * sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig)", "if self.SyncString.strip() in frame_in.strip(): synced = True break except TimeoutError: pass if not", "''' Baud Depends of FAIM config, stopbit is 1 or 2 ''' response_code", "return \" \".join([\"0x%08x\"%int(uid) for uid in uuids]) def ReadCRC(self, address: int, num_bytes: int)", "address%self.kPageSizeBytes == 0 def RamAddressLegal(self, address): return address >= self.RAMRange[0] and address <=", "to running ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError: pass uid", ": 0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" :", "# Make byte array into list of little endian 32 bit words intvecs", "* sector_bytes : (sector - start_sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) sleep(1)", "= bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation = 0x000002fc", "self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d %d %d %d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read", "< (num_bytes): self.Read() # Command success is sent at the end of the", "Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in = self.ReadLine()#discard echo except TimeoutError: pass verified", "= self.WriteCommand(\"I %d %d\"%(start, end)) try: self.ReadLine() response = self.ReadLine().strip() print(\"Check Sectors Blank", "{}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self, on: bool = True): ''' ISP", "0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9,", "0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\" :", "1024 StatusRespLength = len(ISPChip.kNewLine) + 1 #Parity = None #DataBits = 8 #StopBits", "num_bytes: print(data, len(data), num_bytes) assert len(data) == num_bytes return bytes(data) def PrepSectorsForWrite(self, start:", "assert num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R", "data = [] while self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch) if len(data) != num_bytes:", "try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc", "#0x0000001c def FlashAddressLegal(self, address): return address >= self.FlashRange[0] and address <= self.FlashRange[1]; def", "try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as w: print(\"Sync Failed\", w) print(\"Connect to", "assert len(data) == num_bytes return bytes(data) def PrepSectorsForWrite(self, start: int, end: int): try:", "def ReadImage(self, image_file: str): blank_sector = self.FindFirstBlankSector() with open(image_file, 'wb') as f: for", "struct from timeout_decorator import timeout from timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms import Crc", "len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount for sector in reversed(range(sector_count)): print(\"\\nWriting", "#self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R %d %d\"%(start,", "#self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R %d %d\"%(start, num_bytes))", "# Check to see if sector is already equal to RAM, if so", "Unlock(self): ''' Enables Flash Write, Erase, & Go ''' self.ClearBuffer() response_code = self.WriteCommand(\"U", "<= self.SectorCount for sector in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[sector *", "0 assert self.RamRangeLegal(start, len(data)) print(\"Write to RAM %d bytes\"%len(data)) #while i < len(data):", "data += bytes([0xff] *(size - len(data))) return data class NXPChip(ISPChip): kWordSize = 4", "Write(self, string : bytes) -> None: #print(out) assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding", "encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self): ''' Enables Flash Write, Erase, & Go '''", "def Echo(self, on: bool = True): ''' ISP echos host when enabled '''", "int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount for sector in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk", "then Major not like the docs say ''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read", "orig_image[:vector_table_size * kuint32_t_size]) # calculate the checksum over the interrupt vectors intvecs_list =", "break # Check to see if sector is already equal to RAM, if", "0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused 3\" : 0x19, \"Unused 4\"", "= self.ReadLine().strip() return int(resp) except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str)", "ISP echos host when enabled ''' if on: command = \"A 1\" else:", "in frame_in.strip(): synced = True break except TimeoutError: pass if not synced: #Check", "MaxByteTransfer = 1024 StatusRespLength = len(ISPChip.kNewLine) + 1 #Parity = None #DataBits =", ": 0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" :", "len(data) != size: data += bytes([0xff] *(size - len(data))) return data class NXPChip(ISPChip):", "%d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine) try:", "sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count <= self.SectorCount self.Unlock() for sector in", "# Command success is sent at the end of the transferr data =", "len(data)) print(\"Write to RAM %d bytes\"%len(data)) #while i < len(data): # self.Write(\"W %d", "data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc != ram_crc: print(\"CRC Check failed\",", "i in range(3): try: frame_in = self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip() in frame_in:", "int(resp) except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str) -> None: '''", "= int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount for sector in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector)", ": 0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\"", "print(\"Check Sectors Blank response\", response) except TimeoutError: pass if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"],", "\"Return Code Failure in {} {} {}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int, image:", "int, data: bytes): ram_address = self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] +", "0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4,", "xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes)", "%s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self, image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize ==", "RaiseReturnCodeError(code: int, call_name: str) -> None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return", "the interrupt vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 # clear csum value", "import zlib from time import sleep import struct from timeout_decorator import timeout from", "= NXPReturnCodes CRCLocation = 0x000002fc CRCValues = { \"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678,", "not verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush()", "return data class NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes = 64 SectorSizePages = 16", ": 0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" :", "self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch) if len(data) != num_bytes: print(data, len(data), num_bytes) assert", "SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation = 0x000002fc CRCValues = {", "Sectors\") def ErasePages(self, start: int, end: int): response_code = self.WriteCommand(\"X %d %d\"%(start, end))", "item in NXPReturnCodes.items(): if code == item[1]: return item[0] return \"Not Found\" def", "#Check for SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in = self.ReadLine()#discard", "sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime)", "map are equal ''' def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int): self.Write(bytes((\"M", "a checksum in the correct place vector_table_size = 8 kuint32_t_size = 4 #", "int(response[0]) if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"]", "#Parity = None #DataBits = 8 #StopBits = 1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes", "return \"%d.%d\"%(int(major), int(minor)) ''' Checks to see if two sections in the memory", "= 0x000002fc CRCValues = { \"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\" : 0x87654321,", "self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self, start: int, end: int)", "print(\"R %d %d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\")", "failed\", data_crc, ram_crc) else: break # Check to see if sector is already", "assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime)", "#self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) ''' Takes the command string, return the response code", "ram_address)) assert len(data) == sector_size_bytes #data += bytes(sector_size_bytes - len(data)) data_crc = zlib.crc32(data,", "intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) # calculate the checksum over the interrupt", "verified = True break except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\")", "\"Read Memory\") while len(self.data_buffer_in) < (num_bytes): self.Read() # Command success is sent at", "bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) # calculate the checksum over", "= self.ReadLine().strip() print(\"Check Sectors Blank response\", response) except TimeoutError: pass if response_code not", "length): return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length <= self.RAMRange[1] - self.RAMRange[0]", "address%self.kWordSize == 0 def GetReturnCode(self) -> int: for _ in range(10): #sleep(.1) try:", "''' if on: command = \"A 1\" else: command = \"A 0\" response_code", "%d\"%(start, end)) try: self.ReadLine() response = self.ReadLine().strip() print(\"Check Sectors Blank response\", response) except", "enabled ''' if on: command = \"A 1\" else: command = \"A 0\"", "int: for _ in range(10): #sleep(.1) try: resp = self.ReadLine().strip() return int(resp) except", "e: print(e, type(e)) raise def SyncConnection(self): synced = False self.ClearSerialConnection() self.Flush() for i", "self.RAMRange[1] def RamRangeLegal(self, address, length): return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length", "not like the docs say ''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\")", "= self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 with open(image_file, 'rb') as f: prog =", "Code Failure in {} {} {}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int, image: bytes):", "\"CRP3\" : 0x43218765, } kSleepTime = 1 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "> 0) filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk", "%d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To Flash\") #sleep(.2) def Go(self, address: int,", "num_bytes) + self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine() response_code = int(response[0]) if response_code not", "sent at the end of the transferr data = [] while self.data_buffer_in: ch", "* kuint32_t_size]) # calculate the checksum over the interrupt vectors intvecs_list = list(intvecs[:vector_table_size])", "\"Set Baudrate\") def Echo(self, on: bool = True): ''' ISP echos host when", "data_chunk def WriteBinaryToFlash(self, image_file: str, start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize ==", "SectorSizePages = 16 MaxByteTransfer = 1024 StatusRespLength = len(ISPChip.kNewLine) + 1 #Parity =", "not synced: #Check for SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in", "it to what was written ''' data_read = self.ReadMemory(flash_address, len(data)) if len(data) !=", "print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc", "print(e, type(e)) raise def SyncConnection(self): synced = False self.ClearSerialConnection() self.Flush() for i in", "num_bytes return bytes(data) def PrepSectorsForWrite(self, start: int, end: int): try: response_code = self.WriteCommand(\"P", "# clear csum value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes = b''", "stop_bits: int = 1): ''' Baud Depends of FAIM config, stopbit is 1", "vecval) return vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc,", "data.append(ch) if len(data) != num_bytes: print(data, len(data), num_bytes) assert len(data) == num_bytes return", "int: for sector in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount - 1): return sector return", "address): return address >= self.RAMRange[0] and address <= self.RAMRange[1] def RamRangeLegal(self, address, length):", "if data_crc != ram_crc: print(\"CRC Check failed\", data_crc, ram_crc) else: break # Check", "== sector_size_bytes #data += bytes(sector_size_bytes - len(data)) data_crc = zlib.crc32(data, 0) try: ram_crc", "RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data after confirmation # i+=kWordSize #when transfer is complete", "< end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d %d %d %d\"%(start, end,", "TimeoutError: pass if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return", "!= NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code Failure in {} {} {}\".format(call_name, GetErrorCodeName(code), code))", "on: command = \"A 1\" else: command = \"A 0\" response_code = self.WriteCommand(command)", "print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count <= self.SectorCount self.Unlock()", "wait_states: int = 2, mode: int = 0) -> str: assert start <", "self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self) -> int: for sector", "\"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" : 0xff, } def GetErrorCodeName(code: int)", "len(data): # self.Write(\"W %d %d\"%(start + i, kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get confirmation", "RAM To Flash\") #sleep(.2) def Go(self, address: int, thumb_mode: bool = False): '''", "self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector, sector)", "RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert", "print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank =", "minor = self.ReadLine().strip() major = self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) ''' Checks to see", "= self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self, start:", ": 0x13, \"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\" : 0x16,", "len(ISPChip.kNewLine) + 1 #Parity = None #DataBits = 8 #StopBits = 1 SyncString", "= csum vector_table_bytes = b'' for vecval in intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval)", "end)) except Exception: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def", "in {} {} {}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size =", "< len(data): # self.Write(\"W %d %d\"%(start + i, kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get", "data_crc = zlib.crc32(data, 0) try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc =", "\"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\"", "self.ReadMemory(flash_address, len(data)) if len(data) != len(data_read): raise ValueError(\"Read Memory received incorrect amount of", "TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine,", ": 0x1f, \"NoStatusResponse\" : 0xff, } def GetErrorCodeName(code: int) -> str: code =", "import struct from timeout_decorator import timeout from timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms import", "assert sector_bytes%self.kWordSize == 0 with open(image_file, 'rb') as f: prog = f.read() image", "0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" : 0xd,", "response_code = self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self, flash_address: int,", "code)) def RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc, image) image_list =", "# calculate the checksum over the interrupt vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] =", "def WriteCommand(self, command_string: str) -> int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def", "code == item[1]: return item[0] return \"Not Found\" def RaiseReturnCodeError(code: int, call_name: str)", "- start_sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1])", "Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part", "%d %d\"%(start, end)) try: self.ReadLine() response = self.ReadLine().strip() print(\"Check Sectors Blank response\", response)", "InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as w: print(\"Sync Failed\",", "w: print(\"Sync Failed\", w) print(\"Connect to running ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush()", "+ orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data: bytes, size: int) -> bytes: if len(data)", "to RAM, skipping write\") return except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase", "= self.SectorCount - 1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0, last_sector) print(\"Checking Sectors", "self.FlashRange[0] and address%self.kPageSizeBytes == 0 def RamAddressLegal(self, address): return address >= self.RAMRange[0] and", "vecval in intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval) return vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image:", "\"Go\") def EraseSector(self, start: int, end: int): response_code = self.WriteCommand(\"E %d %d\"%(start, end))", "flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def", "len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except", "None: ''' Get a return code with no response ''' code = self.GetReturnCode()", "on: bool = True): ''' ISP echos host when enabled ''' if on:", "<= self.FlashRange[1]; def FlashRangeLegal(self, address, length): print(self.FlashRange, address, length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address", "- (csum % (1<<32)) def Crc32(frame) -> int: #CRC32 polynomial = 0x104c11db6 crc", "to see if sector is already equal to RAM, if so skip try:", "Depends of FAIM config, stopbit is 1 or 2 ''' response_code = self.WriteCommand(\"B", "self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush() for _ in range(2): try: self.ReadLine() except TimeoutError:", "#print(out) assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) ''' Takes the command", "''' Enables Flash Write, Erase, & Go ''' self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\")", "print(\"Part UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set", "num_bytes=len(data)) while ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))", "\"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\"", "num_bytes=len(data)) assert flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self, sector: int,", "= self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc =", "def GetErrorCodeName(code: int) -> str: code = int(code) for item in NXPReturnCodes.items(): if", "self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc != ram_crc: print(\"CRC Check", "+= struct.pack(\"<I\", vecval) return vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes: vector_table_bytes", "self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip() in frame_in: verified = True break except TimeoutError:", "= False self.ClearSerialConnection() self.Flush() for i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine)", "#assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector,", "except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str) -> None: ''' Get", "response_code = self.WriteCommand(\"Z %d %d %d %d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash", "image[sector * sector_bytes : (sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig =", "be Minor then Major not like the docs say ''' response_code = self.WriteCommand(\"K\")", "int): self.Write(bytes((\"M %d %d %d\"%(address1, address2, num_bytes) + self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine()", "0 for entry in frame: csum += entry return (1<<32) - (csum %", ": 0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" :", "0 self.RAMRange = [0, 0] self.FlashRange = [0, 0] self.RAMStartWrite = 0 self.kCheckSumLocation", "sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank", "in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self):", "0 #make not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as f: prog", "= 0 for entry in frame: csum += entry return (1<<32) - (csum", "mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig = [] for i in range(4): sig.append(self.ReadLine().strip())", "the response code ''' def WriteCommand(self, command_string: str) -> int: self.Write(bytes(command_string + self.kNewLine,", "address2: int, num_bytes: int): self.Write(bytes((\"M %d %d %d\"%(address1, address2, num_bytes) + self.kNewLine), encoding=\"utf-8\"))", "\"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused", "self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError: return @timeout(4) def ReadMemory(self,", "end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self, start: int, end: int) -> bool: assert", "try: self.ReadLine() except TimeoutError: pass def InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except", "%s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start: int, end: int): response_code = self.WriteCommand(\"E", "Failure in {} {} {}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size", "length) and length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0 def GetReturnCode(self)", "%d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self, start: int, end: int) -> bool:", "not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def", "'wb') as f: for sector in range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector)) def MassErase(self):", "Exception: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self, flash_address:", "StatusRespLength = len(ISPChip.kNewLine) + 1 #Parity = None #DataBits = 8 #StopBits =", "Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self) -> int: for sector in range(self.SectorCount): if", "data, flash_address: int) -> bool: ''' Read Memory and compare it to what", "-> int: csum = 0 for entry in frame: csum += entry return", "or 2 ''' response_code = self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def", "start < end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d %d %d %d\"%(start,", "1 or 2 ''' response_code = self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\")", "23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate: int, stop_bits: int = 1): ''' Baud", "RaiseReturnCodeError(response_code, \"Read UID\") uuids = [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid)", "= self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream data after confirmation", "= None #DataBits = 8 #StopBits = 1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes =", "OK<CR><LF> response_code = self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream data", "<= self.SectorCount self.Unlock() for sector in reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting Sector %d\"%sector)", "== NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\") resp =", "if so skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already equal to RAM, skipping", "sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc != ram_crc: print(\"CRC Check failed\", data_crc,", ": 0x12345678, \"CRP2\" : 0x87654321, \"CRP3\" : 0x43218765, } kSleepTime = 1 def", "1 #Parity = None #DataBits = 8 #StopBits = 1 SyncString = \"Synchronized\"+ISPChip.kNewLine", "= 0 # clear csum value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes", "prog = f.read() image = prog print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert", "%d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int):", "= self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except TimeoutError: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes))", ": 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" :", "= len(ISPChip.kNewLine) + 1 #Parity = None #DataBits = 8 #StopBits = 1", "try: print(self.ReadLine()) except TimeoutError: return @timeout(4) def ReadMemory(self, start: int, num_bytes: int): assert", "= self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc != ram_crc: print(\"CRC Check failed\", data_crc, ram_crc) else:", "num_bytes) response_code = self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To", "0) try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while", "encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush() for _ in range(2): try: self.ReadLine() except", "\"Set Echo\") def WriteToRam(self, start: int, data: bytes): assert len(data)%self.kWordSize == 0 assert", "\"CRP1\" : 0x12345678, \"CRP2\" : 0x87654321, \"CRP3\" : 0x43218765, } kSleepTime = 1", "[0, 0] self.RAMStartWrite = 0 self.kCheckSumLocation = 7 #0x0000001c def FlashAddressLegal(self, address): return", "ram_address, sector_size_bytes) def WriteSector(self, sector: int, data: bytes): #assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes", "0x87654321, \"CRP3\" : 0x43218765, } kSleepTime = 1 def __init__(self, *args, **kwargs): super().__init__(*args,", "import ISPChip NXPReturnCodes = { \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" :", "''' Start executing code at the specified spot ''' mode = \"\" if", "= self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) ''' Checks to see if two sections in", "ram_address = self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector:", "-> None: #print(out) assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) ''' Takes", "int, stop_bits: int = 1): ''' Baud Depends of FAIM config, stopbit is", "self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 with open(image_file, 'rb') as f: prog = f.read()", "sector_blank = self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to Flash\")", "csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes = b'' for vecval in intvecs_list:", "image = prog print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count", "RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self, start: int, end: int, wait_states: int", "= list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 # clear csum value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc]", "except (UserWarning, TimeoutError) as w: print(\"Sync Failed\", w) print(\"Connect to running ISP\") self.ClearSerialConnection()", "Flash Write, Erase, & Go ''' self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\")", "orig_image: bytes) -> bytes: # make this a valid image by inserting a", "!= num_bytes: print(data, len(data), num_bytes) assert len(data) == num_bytes return bytes(data) def PrepSectorsForWrite(self,", "(NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code", "int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self): ''' Enables Flash Write,", "to what was written ''' data_read = self.ReadMemory(flash_address, len(data)) if len(data) != len(data_read):", "def ReadSector(self, sector: int) -> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0", "RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in) < (num_bytes): self.Read() # Command success is sent", "data: bytes): #assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data = FillDataToFitSector(data,", "except TimeoutError: pass def InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError)", "= self.WriteCommand(\"P %d %d\"%(start, end)) except Exception: response_code = self.WriteCommand(\"P %d %d\"%(start, end))", "(1<<32) - (csum % (1<<32)) def Crc32(frame) -> int: #CRC32 polynomial = 0x104c11db6", "0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13,", "Get a return code with no response ''' code = self.GetReturnCode() RaiseReturnCodeError(code, call_name)", "self.SyncVerified.strip() in frame_in: verified = True break except TimeoutError: pass if not verified:", "running ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError: pass uid =", "+ sector_count <= self.SectorCount self.Unlock() for sector in reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting", "pass if not verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\"))", "- 1): return sector return self.SectorCount - 1 def ReadSector(self, sector: int) ->", "self.Flush() for i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try: frame_in =", "assert self.RamRangeLegal(start, len(data)) print(\"Write to RAM %d bytes\"%len(data)) #while i < len(data): #", "the transferr data = [] while self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch) if len(data)", "+ sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address)) assert len(data)", "30MHz self.SectorCount = 0 self.RAMSize = 0 self.RAMRange = [0, 0] self.FlashRange =", "0) filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk def", "%d %d\"%(address, num_bytes)) except TimeoutError: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read", "ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc != ram_crc: print(\"CRC Check failed\", data_crc, ram_crc)", "filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self, image_file: str, start_sector: int): sector_bytes", "CalculateCheckSum(frame) -> int: csum = 0 for entry in frame: csum += entry", "0x12345678, \"CRP2\" : 0x87654321, \"CRP3\" : 0x43218765, } kSleepTime = 1 def __init__(self,", "0x000002fc CRCValues = { \"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\" : 0x87654321, \"CRP3\"", "0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" : 0xff, } def GetErrorCodeName(code:", "ram_address, sector_size_bytes) print(\"Flash already equal to RAM, skipping write\") return except: pass print(\"Prep", "+ self.kNewLine, encoding=\"utf-8\"))) verified = False for i in range(3): try: frame_in =", "incorrect amount of data\") if isinstance(type(data), data_read): raise TypeError(\"data written and data read", "raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer()", "%s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate) except Exception as e: print(e, type(e)) raise", "\"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\"", ": 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" :", "bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes + orig_image[len(vector_table_bytes):] return image def", "self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as w: print(\"Sync Failed\", w) print(\"Connect to running ISP\")", "= self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self, flash_address: int, ram_address:", "0] self.RAMStartWrite = 0 self.kCheckSumLocation = 7 #0x0000001c def FlashAddressLegal(self, address): return address", "executing code at the specified spot ''' mode = \"\" if thumb_mode: mode", "\"%d.%d\"%(int(major), int(minor)) ''' Checks to see if two sections in the memory map", "into list of little endian 32 bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size *", "crc = Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return crc_calc", "address): return address >= self.FlashRange[0] and address <= self.FlashRange[1]; def FlashRangeLegal(self, address, length):", "= self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\") resp = self.ReadLine() return int(resp) def ReadBootCodeVersion(self):", "i < len(data): # self.Write(\"W %d %d\"%(start + i, kWordSize)) # self.AssertReturnCode(\"Write to", "WriteImage(self, image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 #make not bootable", "assert start < end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d %d %d", "for _ in range(10): #sleep(.1) try: resp = self.ReadLine().strip() return int(resp) except ValueError:", "!= size: data += bytes([0xff] *(size - len(data))) return data class NXPChip(ISPChip): kWordSize", "%d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self, start: int, end: int,", "== data_chunk def WriteBinaryToFlash(self, image_file: str, start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize", "sector_count <= self.SectorCount for sector in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[sector", "code = int(code) for item in NXPReturnCodes.items(): if code == item[1]: return item[0]", "0] self.FlashRange = [0, 0] self.RAMStartWrite = 0 self.kCheckSumLocation = 7 #0x0000001c def", "return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1) and length <= self.FlashRange[1] -", "RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self, start: int, end: int) -> bool: assert start", "math import zlib from time import sleep import struct from timeout_decorator import timeout", "False): ''' Start executing code at the specified spot ''' mode = \"\"", "to see if two sections in the memory map are equal ''' def", "encoding = \"utf-8\")) ''' Takes the command string, return the response code '''", ": 0x15, \"Unused 2\" : 0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused", "sector_size_bytes) print(\"Flash already equal to RAM, skipping write\") return except: pass print(\"Prep Sector\")", "Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush() for", "seems to be Minor then Major not like the docs say ''' response_code", "ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str) -> None: ''' Get a", "def ReadMemory(self, start: int, num_bytes: int): assert num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start, num_bytes)", "first for some reason. Also the boot version seems to be Minor then", "self.SectorCount - 1 def ReadSector(self, sector: int) -> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert", "1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0, last_sector) print(\"Checking Sectors are blank\") assert", "Read Memory and compare it to what was written ''' data_read = self.ReadMemory(flash_address,", "self.Flush() for _ in range(2): try: self.ReadLine() except TimeoutError: pass def SetCrystalFrequency(self, frequency_khz:", "image_list[vector_table_loc * kuint32_t_size + byte] = 0 return bytes(image_list) # 2s compliment of", "import math import zlib from time import sleep import struct from timeout_decorator import", "while ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if", "self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self, sector:", "\"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\"", "= \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes", "place vector_table_size = 8 kuint32_t_size = 4 # Make byte array into list", "intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval) return vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image: bytes) ->", "\"Read CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self, start: int, end: int, wait_states: int =", "2s compliment of checksum def CalculateCheckSum(frame) -> int: csum = 0 for entry", "-> bool: assert start <= end response_code = self.WriteCommand(\"I %d %d\"%(start, end)) try:", "@timeout(4) def ReadMemory(self, start: int, num_bytes: int): assert num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start,", "version seems to be Minor then Major not like the docs say '''", "for SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in = self.ReadLine()#discard echo", "= 16 MaxByteTransfer = 1024 StatusRespLength = len(ISPChip.kNewLine) + 1 #Parity = None", "= self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address,", "WriteFlashSector(self, sector: int, data: bytes): ram_address = self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address =", "address >= self.RAMRange[0] and address <= self.RAMRange[1] def RamRangeLegal(self, address, length): return self.RamAddressLegal(address)", "try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as w: print(\"Sync Failed\", w) print(\"Connect", "= f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog))", "type(e)) raise def SyncConnection(self): synced = False self.ClearSerialConnection() self.Flush() for i in range(5):", "4\" : 0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\"", "flash_address = self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address,", "0x19, \"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" :", "import sleep import struct from timeout_decorator import timeout from timeout_decorator.timeout_decorator import TimeoutError from", "uuids = [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for uid in", "def WriteSector(self, sector: int, data: bytes): #assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) >", "0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\" : 0x87654321, \"CRP3\" : 0x43218765, } kSleepTime =", "self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self, start: int, data: bytes): assert len(data)%self.kWordSize ==", "str: assert start < end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d %d", "Echo(self, on: bool = True): ''' ISP echos host when enabled ''' if", "import timeout from timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms import Crc from .ISPChip import", "\"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\" : 0x16, \"EFRO_NO_POWER\" :", "vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes + orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data:", "kuint32_t_size = 4 MakeBootable(vector_table_loc, image) image_list = list(image) for byte in range(kuint32_t_size): image_list[vector_table_loc", "break except TimeoutError: pass if not synced: #Check for SyncString raise UserWarning(\"Syncronization Failure\")", "self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self, image_file: str, start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert", "self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file: str): blank_sector", "frame_in = self.ReadLine() if self.SyncString.strip() in frame_in.strip(): synced = True break except TimeoutError:", "int) -> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes, sector_bytes)", "address: int, thumb_mode: bool = False): ''' Start executing code at the specified", "+= bytes([0xff] *(size - len(data))) return data class NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes", "%d %d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig = [] for", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz == 30MHz self.SectorCount =", "start: int, end: int, wait_states: int = 2, mode: int = 0) ->", "major = self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) ''' Checks to see if two sections", "def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError: pass def InitConnection(self): self.ResetSerialConnection()", "[] for i in range(4): sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\")", "except TimeoutError: pass uid = self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot", "\"Unused 3\" : 0x19, \"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" :", "= self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self, string : bytes) -> None: #print(out) assert(type(string)", "response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self, start: int, data: bytes): assert", "def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified = False for i", "assert len(data) == sector_size_bytes #data += bytes(sector_size_bytes - len(data)) data_crc = zlib.crc32(data, 0)", "success is sent at the end of the transferr data = [] while", "frame_in = self.ReadLine()#discard echo except TimeoutError: pass verified = False for i in", "response = self.ReadLine() response_code = int(response[0]) if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code,", "in range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector = self.SectorCount - 1", "(num_bytes): self.Read() # Command success is sent at the end of the transferr", "0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" : 0x11,", "with no response ''' code = self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self, string :", "self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address,", "CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes)", "= False): ''' Start executing code at the specified spot ''' mode =", "= MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount", "ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\"))", "in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code =", "Erase, & Go ''' self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self,", "self.ReadLine() response = self.ReadLine().strip() print(\"Check Sectors Blank response\", response) except TimeoutError: pass if", "sector in reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[(sector-start_sector) *", "def SetBaudRate(self, baud_rate: int, stop_bits: int = 1): ''' Baud Depends of FAIM", "None #DataBits = 8 #StopBits = 1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString,", "last_sector = self.SectorCount - 1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0, last_sector) print(\"Checking", "{ \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" : 0x3,", "try: self.ReadLine() response = self.ReadLine().strip() print(\"Check Sectors Blank response\", response) except TimeoutError: pass", "def ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\") resp = self.ReadLine() return", "def RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc, image) image_list = list(image)", "handler sends OK<CR><LF> response_code = self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\")", ": 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" :", "in NXPReturnCodes.items(): if code == item[1]: return item[0] return \"Not Found\" def RaiseReturnCodeError(code:", "resp = self.ReadLine() return int(resp) def ReadBootCodeVersion(self): ''' LPC84x sends a 0x1a first", "0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\" : 0xc,", "#self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError: return @timeout(4) def ReadMemory(self, start: int, num_bytes: int):", "int = 0) -> str: assert start < end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code", "def FindFirstBlankSector(self) -> int: for sector in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount - 1):", "self.ReadLine() except TimeoutError: pass def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified", "try: frame_in = self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip() in frame_in: verified = True", "\"Unused 2\" : 0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused 3\" :", "\"Read Part ID\") resp = self.ReadLine() return int(resp) def ReadBootCodeVersion(self): ''' LPC84x sends", "return int(resp) def ReadBootCodeVersion(self): ''' LPC84x sends a 0x1a first for some reason.", "orig_image) image = vector_table_bytes + orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data: bytes, size: int)", "%d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in) < (num_bytes): self.Read() # Command success", "byte in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size + byte] = 0 return bytes(image_list) #", "pass uid = self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code Version:", "0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" : 0xf,", "flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self, sector: int, data: bytes):", "sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address, ram_address,", "= 'T' response_code = self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start:", "wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig = [] for i in range(4):", "int) -> bytes: if len(data) != size: data += bytes([0xff] *(size - len(data)))", "0 self.kCheckSumLocation = 7 #0x0000001c def FlashAddressLegal(self, address): return address >= self.FlashRange[0] and", "ReadSector(self, sector: int) -> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 return", "%s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self) -> int: for sector in range(self.SectorCount): if self.CheckSectorsBlank(sector,", "\" \".join([\"0x%08x\"%int(uid) for uid in uuids]) def ReadCRC(self, address: int, num_bytes: int) ->", "MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int): self.Write(bytes((\"M %d %d %d\"%(address1, address2, num_bytes)", "synced = True break except TimeoutError: pass if not synced: #Check for SyncString", "0 # clear csum value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes =", "the specified spot ''' mode = \"\" if thumb_mode: mode = 'T' response_code", "sections in the memory map are equal ''' def MemoryLocationsEqual(self, address1: int, address2:", "break except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def", "num_bytes) assert len(data) == num_bytes return bytes(data) def PrepSectorsForWrite(self, start: int, end: int):", "* sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\")", "assert start_sector + sector_count <= self.SectorCount self.Unlock() for sector in reversed(range(start_sector, start_sector +", "a return code with no response ''' code = self.GetReturnCode() RaiseReturnCodeError(code, call_name) def", "%d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self, start: int, end: int) ->", "SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in = self.ReadLine()#discard echo except", "polynomial = 0x104c11db6 crc = Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc =", "Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount for sector in reversed(range(sector_count)):", "checksum in the correct place vector_table_size = 8 kuint32_t_size = 4 # Make", "def WriteToRam(self, start: int, data: bytes): assert len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start, len(data))", "4 # Make byte array into list of little endian 32 bit words", "response\", response) except TimeoutError: pass if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank", "flash_address: int, ram_address: int, num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code", "response) except TimeoutError: pass if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check", "self.RAMRange[0] and address%self.kWordSize == 0 def GetReturnCode(self) -> int: for _ in range(10):", "print(\"Sector \", sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector = self.SectorCount - 1 sleep(1) self.ClearBuffer()", "= self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate: int, stop_bits: int = 1):", "RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self, on: bool = True): ''' ISP echos host", "image_file: str, start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 with open(image_file,", "int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 with open(image_file, 'rb') as f:", "self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM", "in range(10): #sleep(.1) try: resp = self.ReadLine().strip() return int(resp) except ValueError: pass return", "<= self.RAMRange[1] def RamRangeLegal(self, address, length): return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and", "64 SectorSizePages = 16 MaxByteTransfer = 1024 StatusRespLength = len(ISPChip.kNewLine) + 1 #Parity", "endian 32 bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) # calculate the", "response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids = [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()]", "int, num_bytes: int): assert num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read()", "super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz == 30MHz self.SectorCount = 0 self.RAMSize = 0", "response_code = self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self, start: int,", "def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int): self.Write(bytes((\"M %d %d %d\"%(address1, address2,", "kSleepTime = 1 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz ==", ": 0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" :", "0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self, start: int, data: bytes):", "== 0 def GetReturnCode(self) -> int: for _ in range(10): #sleep(.1) try: resp", "def RaiseReturnCodeError(code: int, call_name: str) -> None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning(", "self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep", "self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor = self.ReadLine().strip() major = self.ReadLine().strip() return \"%d.%d\"%(int(major),", "pass def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified = False for", "assert sector_bytes%self.kWordSize == 0 #make not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb')", "return self.GetReturnCode() def Unlock(self): ''' Enables Flash Write, Erase, & Go ''' self.ClearBuffer()", "'rb') as f: prog = f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation,", "def FillDataToFitSector(data: bytes, size: int) -> bytes: if len(data) != size: data +=", "resp = self.ReadLine().strip() return int(resp) except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name:", "address <= self.RAMRange[1] def RamRangeLegal(self, address, length): return self.RamAddressLegal(address) and self.RamAddressLegal(address + length)", "to %d\"%self.baud_rate) except Exception as e: print(e, type(e)) raise def SyncConnection(self): synced =", "Also the boot version seems to be Minor then Major not like the", "= \"\" if thumb_mode: mode = 'T' response_code = self.WriteCommand(\"G %d %s\"%(address, mode))", "ID\") resp = self.ReadLine() return int(resp) def ReadBootCodeVersion(self): ''' LPC84x sends a 0x1a", "-> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def", "*(size - len(data))) return data class NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes = 64", "RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code = self.WriteCommand(\"J\")", "# self.Write(data[i:i+kWordSize])#Stream data after confirmation # i+=kWordSize #when transfer is complete the handler", "RaiseReturnCodeError(response_code, \"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read", "checksum def CalculateCheckSum(frame) -> int: csum = 0 for entry in frame: csum", "# self.AssertReturnCode(\"Write to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data after confirmation # i+=kWordSize #when", "intvecs_list[vector_table_loc] = csum vector_table_bytes = b'' for vecval in intvecs_list: vector_table_bytes += struct.pack(\"<I\",", "RaiseReturnCodeError(response_code, \"Read Write FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError:", "csum = 0 for entry in frame: csum += entry return (1<<32) -", "\"FLASH_NO_POWER\" : 0x18, \"Unused 3\" : 0x19, \"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\" :", "-> bool: ''' Read Memory and compare it to what was written '''", "CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self, start: int, end: int, wait_states: int = 2,", "self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize", "ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc", "%d %d\"%(address1, address2, num_bytes) + self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine() response_code = int(response[0])", "for vecval in intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval) return vector_table_bytes def MakeBootable(vector_table_loc: int,", "for _ in range(2): try: self.ReadLine() except TimeoutError: pass def SetCrystalFrequency(self, frequency_khz: int):", "Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector,", "[ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for uid in uuids]) def", "sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime)", "= [0, 0] self.FlashRange = [0, 0] self.RAMStartWrite = 0 self.kCheckSumLocation = 7", "!= len(data_read): raise ValueError(\"Read Memory received incorrect amount of data\") if isinstance(type(data), data_read):", "calculate the checksum over the interrupt vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0", "% (1<<32)) def Crc32(frame) -> int: #CRC32 polynomial = 0x104c11db6 crc = Crc(width=32,", "Version\") minor = self.ReadLine().strip() major = self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) ''' Checks to", "_ in range(10): #sleep(.1) try: resp = self.ReadLine().strip() return int(resp) except ValueError: pass", "if on: command = \"A 1\" else: command = \"A 0\" response_code =", "RamAddressLegal(self, address): return address >= self.RAMRange[0] and address <= self.RAMRange[1] def RamRangeLegal(self, address,", "assert len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start, len(data)) print(\"Write to RAM %d bytes\"%len(data)) #while", "+ byte] = 0 return bytes(image_list) # 2s compliment of checksum def CalculateCheckSum(frame)", "sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def", "-> int: for _ in range(10): #sleep(.1) try: resp = self.ReadLine().strip() return int(resp)", "num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C %d %d", "UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush()", "int): assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C %d %d %d\"%(flash_address,", "and self.RamAddressLegal(address + length) and length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize ==", "spot ''' mode = \"\" if thumb_mode: mode = 'T' response_code = self.WriteCommand(\"G", "assert flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self, sector: int, data:", "response = self.ReadLine().strip() print(\"Check Sectors Blank response\", response) except TimeoutError: pass if response_code", "= 64 SectorSizePages = 16 MaxByteTransfer = 1024 StatusRespLength = len(ISPChip.kNewLine) + 1", "sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc =", "address: int, num_bytes: int) -> int: try: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes))", "num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R", "SyncConnection(self): synced = False self.ClearSerialConnection() self.Flush() for i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?'", "for sector in range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector = self.SectorCount", "intvecs_list[vector_table_loc] = 0 # clear csum value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum", "transfer is complete the handler sends OK<CR><LF> response_code = self.WriteCommand(\"W %d %d\"%(start, len(data)))", "data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self,", "''' response_code = self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self, on:", "self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self, start: int, end: int):", "to RAM %d bytes\"%len(data)) #while i < len(data): # self.Write(\"W %d %d\"%(start +", "reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[(sector-start_sector) * sector_bytes :", "return data == data_read def WriteFlashSector(self, sector: int, data: bytes): ram_address = self.RAMStartWrite", "for byte in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size + byte] = 0 return bytes(image_list)", "struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) # calculate the checksum over the interrupt vectors intvecs_list", "RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self, start: int, data: bytes): assert len(data)%self.kWordSize == 0", "of little endian 32 bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) #", "= [0, 0] self.RAMStartWrite = 0 self.kCheckSumLocation = 7 #0x0000001c def FlashAddressLegal(self, address):", "Write, Erase, & Go ''' self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def", "xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes:", "= list(image) for byte in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size + byte] = 0", "Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush() for _ in", "boot version seems to be Minor then Major not like the docs say", "return vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image)", "\"utf-8\")) ''' Takes the command string, return the response code ''' def WriteCommand(self,", "struct.pack(\"<I\", vecval) return vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes: vector_table_bytes =", "break except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data,", "self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids = [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \"", "pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector,", "in reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[(sector-start_sector) * sector_bytes", "RamRangeLegal(self, address, length): return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length <= self.RAMRange[1]", "timeout from timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms import Crc from .ISPChip import ISPChip", "in range(2): try: self.ReadLine() except TimeoutError: pass def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz +", "except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert", "def FlashAddressLegal(self, address): return address >= self.FlashRange[0] and address <= self.FlashRange[1]; def FlashRangeLegal(self,", "int(self.ReadLine().strip()) def ReadFlashSig(self, start: int, end: int, wait_states: int = 2, mode: int", "response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids =", "written ''' data_read = self.ReadMemory(flash_address, len(data)) if len(data) != len(data_read): raise ValueError(\"Read Memory", "def InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as w: print(\"Sync", "self.kCheckSumLocation = 7 #0x0000001c def FlashAddressLegal(self, address): return address >= self.FlashRange[0] and address", "!= data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc !=", "if not verified: raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data, flash_address: int) -> bool:", "what was written ''' data_read = self.ReadMemory(flash_address, len(data)) if len(data) != len(data_read): raise", "ValueError(\"Read Memory received incorrect amount of data\") if isinstance(type(data), data_read): raise TypeError(\"data written", "int(resp) def ReadBootCodeVersion(self): ''' LPC84x sends a 0x1a first for some reason. Also", "print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount for sector in", "written and data read are of different types\") return data == data_read def", "''' Takes the command string, return the response code ''' def WriteCommand(self, command_string:", "<= end response_code = self.WriteCommand(\"I %d %d\"%(start, end)) try: self.ReadLine() response = self.ReadLine().strip()", "Blank response\", response) except TimeoutError: pass if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code,", "i in range(4): sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read", "self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as w: print(\"Sync Failed\", w)", "in the correct place vector_table_size = 8 kuint32_t_size = 4 # Make byte", "for sector in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[sector * sector_bytes :", "''' ISP echos host when enabled ''' if on: command = \"A 1\"", "range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount - 1): return sector return self.SectorCount - 1 def", "Check Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read", "int, data: bytes): assert len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start, len(data)) print(\"Write to RAM", "self.Flush() self.ClearBuffer() except TimeoutError: pass uid = self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version =", "TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data, flash_address: int)", "self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start: int, end: int): response_code", "self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self, on: bool = True):", "num_bytes=len(data)) except Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address,", "baud_rate: int, stop_bits: int = 1): ''' Baud Depends of FAIM config, stopbit", "response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\") resp = self.ReadLine() return int(resp) def", "def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine,", "uid = self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version)", "data_chunk = image[(sector-start_sector) * sector_bytes : (sector - start_sector + 1) * sector_bytes]", "print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate) except Exception as e:", "the correct place vector_table_size = 8 kuint32_t_size = 4 # Make byte array", "list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 # clear csum value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] =", ": 0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" :", "1 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz == 30MHz self.SectorCount", "self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str) -> None: ''' Get a return code with", "self.ReadLine()#discard echo except TimeoutError: pass verified = False for i in range(3): try:", "if sector is already equal to RAM, if so skip try: self.MemoryLocationsEqual(flash_address, ram_address,", "data_crc, ram_crc) else: break # Check to see if sector is already equal", "ReadBootCodeVersion(self): ''' LPC84x sends a 0x1a first for some reason. Also the boot", "return bytes(image_list) # 2s compliment of checksum def CalculateCheckSum(frame) -> int: csum =", "sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0, last_sector) print(\"Checking Sectors are blank\") assert self.CheckSectorsBlank(0,", "encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try: frame_in = self.ReadLine() if self.SyncString.strip() in frame_in.strip(): synced", "\"Read Bootcode Version\") minor = self.ReadLine().strip() major = self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) '''", "(1<<32)) def Crc32(frame) -> int: #CRC32 polynomial = 0x104c11db6 crc = Crc(width=32, poly=polynomial,", "encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation = 0x000002fc CRCValues = { \"NO_ISP\": 0x4e697370, \"CRP1\"", "= self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file: str):", "time import sleep import struct from timeout_decorator import timeout from timeout_decorator.timeout_decorator import TimeoutError", "in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount - 1): return sector return self.SectorCount - 1", "return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file: str): blank_sector = self.FindFirstBlankSector() with open(image_file, 'wb')", "= 4 # Make byte array into list of little endian 32 bit", "code with no response ''' code = self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self, string", "num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d", "0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5,", "TimeoutError) as w: print(\"Sync Failed\", w) print(\"Connect to running ISP\") self.ClearSerialConnection() self.Echo(False) try:", "= self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self, start: int, data: bytes): assert len(data)%self.kWordSize", "%d\"%(start + i, kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data after", ": 0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" :", "self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self, sector: int, data: bytes): #assert data sector_bytes =", "= self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in) < (num_bytes): self.Read()", "#make not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as f: prog =", "int, wait_states: int = 2, mode: int = 0) -> str: assert start", "== 0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start,", "f.write(self.ReadSector(sector)) def MassErase(self): last_sector = self.SectorCount - 1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector)", "start: int, end: int): response_code = self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\")", "start: int, end: int): response_code = self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\")", "ErasePages(self, start: int, end: int): response_code = self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase", "range(10): #sleep(.1) try: resp = self.ReadLine().strip() return int(resp) except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"]", "self.FlashRange[0] and address <= self.FlashRange[1]; def FlashRangeLegal(self, address, length): print(self.FlashRange, address, length) return", "by inserting a checksum in the correct place vector_table_size = 8 kuint32_t_size =", "response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self,", "NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code =", "def ReadCRC(self, address: int, num_bytes: int) -> int: try: response_code = self.WriteCommand(\"S %d", "Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address)) assert len(data) == sector_size_bytes #data += bytes(sector_size_bytes", "NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\") resp = self.ReadLine()", "= Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return crc_calc def", "sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector = self.SectorCount - 1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0,", "bytes(sector_size_bytes - len(data)) data_crc = zlib.crc32(data, 0) try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except", "range(2): try: self.ReadLine() except TimeoutError: pass def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine,", "length): print(self.FlashRange, address, length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1) and", "RaiseReturnCodeError(response_code, \"Copy RAM To Flash\") #sleep(.2) def Go(self, address: int, thumb_mode: bool =", "return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\")", "%d\"%(address, num_bytes)) except TimeoutError: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\")", "return item[0] return \"Not Found\" def RaiseReturnCodeError(code: int, call_name: str) -> None: if", "self.Read() self.ClearBuffer() self.Flush() for _ in range(2): try: self.ReadLine() except TimeoutError: pass def", "Part ID\") resp = self.ReadLine() return int(resp) def ReadBootCodeVersion(self): ''' LPC84x sends a", "ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc !=", "#sleep(.1) try: resp = self.ReadLine().strip() return int(resp) except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def", "len(data) == sector_size_bytes #data += bytes(sector_size_bytes - len(data)) data_crc = zlib.crc32(data, 0) try:", "confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError: return @timeout(4) def ReadMemory(self, start: int, num_bytes:", "csum vector_table_bytes = b'' for vecval in intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval) return", "WriteBinaryToFlash(self, image_file: str, start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 with", "self.RAMRange = [0, 0] self.FlashRange = [0, 0] self.RAMStartWrite = 0 self.kCheckSumLocation =", "Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate) except Exception as e: print(e, type(e))", "\".join([\"0x%08x\"%int(uid) for uid in uuids]) def ReadCRC(self, address: int, num_bytes: int) -> int:", "= \"A 0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self, start: int,", "sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM", "self.CheckSectorsBlank(sector, self.SectorCount - 1): return sector return self.SectorCount - 1 def ReadSector(self, sector:", "WriteSector(self, sector: int, data: bytes): #assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0)", ": 0x87654321, \"CRP3\" : 0x43218765, } kSleepTime = 1 def __init__(self, *args, **kwargs):", "== 0 with open(image_file, 'rb') as f: prog = f.read() image = prog", "sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector)", "= 1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\")", "self.ReadLine() return int(resp) def ReadBootCodeVersion(self): ''' LPC84x sends a 0x1a first for some", "stopbit is 1 or 2 ''' response_code = self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code,", "self.kNewLine, encoding=\"utf-8\"))) verified = False for i in range(3): try: frame_in = self.ReadLine()#Should", "RAM, skipping write\") return except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\")", "self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for uid in uuids]) def ReadCRC(self, address:", "data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self) ->", "= True): ''' ISP echos host when enabled ''' if on: command =", "= int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count <= self.SectorCount self.Unlock() for sector in reversed(range(start_sector,", "3\" : 0x19, \"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c,", ": 0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" :", "print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush() for _", "+ 1) * sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature:", "GetErrorCodeName(code: int) -> str: code = int(code) for item in NXPReturnCodes.items(): if code", "len(data) != len(data_read): raise ValueError(\"Read Memory received incorrect amount of data\") if isinstance(type(data),", "Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime)", "sleep import struct from timeout_decorator import timeout from timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms", "Major not like the docs say ''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode", "in frame: csum += entry return (1<<32) - (csum % (1<<32)) def Crc32(frame)", ": 0x19, \"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\"", "= 0 self.kCheckSumLocation = 7 #0x0000001c def FlashAddressLegal(self, address): return address >= self.FlashRange[0]", "response_code = self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream data after", "Check failed\", data_crc, ram_crc) else: break # Check to see if sector is", "def CheckSectorsBlank(self, start: int, end: int) -> bool: assert start <= end response_code", "EraseSector(self, start: int, end: int): response_code = self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase", "TimeoutError: pass def InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as", "int) -> int: try: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except TimeoutError: response_code", "None: #print(out) assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) ''' Takes the", "vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 # clear csum value csum =", "self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1) and length <= self.FlashRange[1] - self.FlashRange[0]", "def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image =", "0 return bytes(image_list) # 2s compliment of checksum def CalculateCheckSum(frame) -> int: csum", ": 0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" :", "\"CRP2\" : 0x87654321, \"CRP3\" : 0x43218765, } kSleepTime = 1 def __init__(self, *args,", "#self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code,", "= 2, mode: int = 0) -> str: assert start < end assert(self.FlashAddressLegal(start)", "print(\"Baudrate set to %d\"%self.baud_rate) except Exception as e: print(e, type(e)) raise def SyncConnection(self):", "self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address,", "= self.data_buffer_in.popleft() data.append(ch) if len(data) != num_bytes: print(data, len(data), num_bytes) assert len(data) ==", "with open(image_file, 'rb') as f: prog = f.read() image = prog print(\"Program Length:\",", "response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try:", "self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush() for _ in range(2): try: self.ReadLine()", "config, stopbit is 1 or 2 ''' response_code = self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits))", "prog) image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count", "self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector)", "GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes + orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data: bytes, size:", "= self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor = self.ReadLine().strip() major = self.ReadLine().strip() return", "response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"]", "0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\" :", "bytes(data) def PrepSectorsForWrite(self, start: int, end: int): try: response_code = self.WriteCommand(\"P %d %d\"%(start,", "data after confirmation # i+=kWordSize #when transfer is complete the handler sends OK<CR><LF>", "data read are of different types\") return data == data_read def WriteFlashSector(self, sector:", "NXPReturnCodes.items(): if code == item[1]: return item[0] return \"Not Found\" def RaiseReturnCodeError(code: int,", "to RAM, if so skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already equal to", "int, orig_image: bytes) -> bytes: # make this a valid image by inserting", "= self.ReadLine().strip() major = self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) ''' Checks to see if", "words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) # calculate the checksum over the", "host when enabled ''' if on: command = \"A 1\" else: command =", "self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self, start: int,", "-> int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self): ''' Enables Flash", "in range(4): sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write", "try: self.ReadLine() except TimeoutError: pass def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\")))", "if self.CheckSectorsBlank(sector, self.SectorCount - 1): return sector return self.SectorCount - 1 def ReadSector(self,", "= self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self) -> int: for", ": 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" :", "= self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip() in frame_in: verified = True break except", "csum value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes = b'' for vecval", "len(data)) data_crc = zlib.crc32(data, 0) try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc", "RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate: int, stop_bits: int = 1): ''' Baud Depends", "call_name: str) -> None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code Failure", "1): ''' Baud Depends of FAIM config, stopbit is 1 or 2 '''", "num_bytes)) response_code = self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in) <", "length - 1) and length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0", "# i+=kWordSize #when transfer is complete the handler sends OK<CR><LF> response_code = self.WriteCommand(\"W", "Address: %x\\n\"%(sector, flash_address, ram_address)) assert len(data) == sector_size_bytes #data += bytes(sector_size_bytes - len(data))", "sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file: str): blank_sector = self.FindFirstBlankSector()", "= vector_table_bytes + orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data: bytes, size: int) -> bytes:", "%d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip()) def ReadFlashSig(self, start: int, end:", "response_code = self.WriteCommand(\"P %d %d\"%(start, end)) except Exception: response_code = self.WriteCommand(\"P %d %d\"%(start,", "= self.ReadLine() if self.SyncString.strip() in frame_in.strip(): synced = True break except TimeoutError: pass", "print(\"Programming Complete.\") def WriteImage(self, image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0", "} kSleepTime = 1 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz", "int, data: bytes): #assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data =", "pass def InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as w:", "the handler sends OK<CR><LF> response_code = self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to", "received incorrect amount of data\") if isinstance(type(data), data_read): raise TypeError(\"data written and data", "self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\") resp = self.ReadLine() return int(resp) def ReadBootCodeVersion(self): '''", "int): response_code = self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self, start:", "= 1): ''' Baud Depends of FAIM config, stopbit is 1 or 2", "len(data) == num_bytes return bytes(data) def PrepSectorsForWrite(self, start: int, end: int): try: response_code", "self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError: pass def InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection()", "was written ''' data_read = self.ReadMemory(flash_address, len(data)) if len(data) != len(data_read): raise ValueError(\"Read", "byte] = 0 return bytes(image_list) # 2s compliment of checksum def CalculateCheckSum(frame) ->", "self.Write(bytes((\"M %d %d %d\"%(address1, address2, num_bytes) + self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine() response_code", "self.RAMSize = 0 self.RAMRange = [0, 0] self.FlashRange = [0, 0] self.RAMStartWrite =", "len(data), num_bytes) assert len(data) == num_bytes return bytes(data) def PrepSectorsForWrite(self, start: int, end:", "UID\") uuids = [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for uid", "f: for sector in range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector =", "\", sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector = self.SectorCount - 1 sleep(1) self.ClearBuffer() self.Unlock()", "for i in range(3): try: frame_in = self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip() in", "def MassErase(self): last_sector = self.SectorCount - 1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0,", "bool: assert start <= end response_code = self.WriteCommand(\"I %d %d\"%(start, end)) try: self.ReadLine()", "except TimeoutError: pass if not synced: #Check for SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush()", "self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file: str): blank_sector = self.FindFirstBlankSector() with open(image_file, 'wb') as", "int, num_bytes: int): self.Write(bytes((\"M %d %d %d\"%(address1, address2, num_bytes) + self.kNewLine), encoding=\"utf-8\")) response", "ram_crc) else: break # Check to see if sector is already equal to", "Complete.\") def FindFirstBlankSector(self) -> int: for sector in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount -", "int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count <= self.SectorCount self.Unlock() for sector in reversed(range(start_sector, start_sector", "ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids = [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(),", "%d\"%sector) data_chunk = image[(sector-start_sector) * sector_bytes : (sector - start_sector + 1) *", "self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency) except (UserWarning, TimeoutError) as w: print(\"Sync Failed\", w) print(\"Connect to running", "= self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc != data_crc:", "and length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0 def GetReturnCode(self) ->", "bytes) -> None: #print(out) assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) '''", "0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file: str): blank_sector = self.FindFirstBlankSector() with open(image_file,", "def AssertReturnCode(self, call_name: str) -> None: ''' Get a return code with no", "end of the transferr data = [] while self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch)", "prog) print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount for sector", "response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate: int, stop_bits: int =", "CRCValues = { \"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\" : 0x87654321, \"CRP3\" :", "verified = False for i in range(3): try: frame_in = self.ReadLine()#Should be OK\\r\\n", "{} {}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc,", "address, length): print(self.FlashRange, address, length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1)", "confirmation # i+=kWordSize #when transfer is complete the handler sends OK<CR><LF> response_code =", "open(image_file, 'rb') as f: prog = f.read() image = prog print(\"Program Length:\", len(prog))", "= False for i in range(3): try: frame_in = self.ReadLine()#Should be OK\\r\\n if", "\"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\" : 0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" : 0x18,", "self.AssertReturnCode(\"Write to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data after confirmation # i+=kWordSize #when transfer", "are equal ''' def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int): self.Write(bytes((\"M %d", "= True break except TimeoutError: pass if not synced: #Check for SyncString raise", "int): try: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) except Exception: response_code = self.WriteCommand(\"P", "\"Unlock\") def SetBaudRate(self, baud_rate: int, stop_bits: int = 1): ''' Baud Depends of", "Baud Depends of FAIM config, stopbit is 1 or 2 ''' response_code =", "= bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation = 0x000002fc CRCValues = { \"NO_ISP\":", "except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self):", "self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError: pass uid = self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version", "int) -> bool: ''' Read Memory and compare it to what was written", "\"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\"", "== 0 #make not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as f:", "+ 1 #Parity = None #DataBits = 8 #StopBits = 1 SyncString =", "print(\"Sync Failed\", w) print(\"Connect to running ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer()", "%d\"%self.baud_rate) except Exception as e: print(e, type(e)) raise def SyncConnection(self): synced = False", "bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation = 0x000002fc CRCValues", "(csum % (1<<32)) def Crc32(frame) -> int: #CRC32 polynomial = 0x104c11db6 crc =", "valid image by inserting a checksum in the correct place vector_table_size = 8", "%d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in) < (num_bytes): self.Read() # Command", "print(\"Write to RAM %d bytes\"%len(data)) #while i < len(data): # self.Write(\"W %d %d\"%(start", "= self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address))", "assert self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy", "equal to RAM, if so skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already equal", "= 0) -> str: assert start < end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code =", "f: prog = f.read() image = prog print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes))", "checksum over the interrupt vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 # clear", "1) and length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0 def RamAddressLegal(self,", "def Write(self, string : bytes) -> None: #print(out) assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine,", "sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount for sector in reversed(range(sector_count)): print(\"\\nWriting Sector", "kuint32_t_size = 4 # Make byte array into list of little endian 32", "size: data += bytes([0xff] *(size - len(data))) return data class NXPChip(ISPChip): kWordSize =", "0 self.RAMSize = 0 self.RAMRange = [0, 0] self.FlashRange = [0, 0] self.RAMStartWrite", "command string, return the response code ''' def WriteCommand(self, command_string: str) -> int:", "f.read() image = prog print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector +", "image_file: str): blank_sector = self.FindFirstBlankSector() with open(image_file, 'wb') as f: for sector in", "- 1 def ReadSector(self, sector: int) -> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize", "assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d %d %d %d\"%(start, end, wait_states, mode))", "-> bytes: if len(data) != size: data += bytes([0xff] *(size - len(data))) return", "bytes\"%len(data)) #while i < len(data): # self.Write(\"W %d %d\"%(start + i, kWordSize)) #", "len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count <= self.SectorCount self.Unlock() for sector", "- self.RAMRange[0] and address%self.kWordSize == 0 def GetReturnCode(self) -> int: for _ in", "raise TypeError(\"data written and data read are of different types\") return data ==", "call_name) def Write(self, string : bytes) -> None: #print(out) assert(type(string) is bytes) self.WriteSerial(string)", "be OK\\r\\n if self.SyncVerified.strip() in frame_in: verified = True break except TimeoutError: pass", "self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except", "Start executing code at the specified spot ''' mode = \"\" if thumb_mode:", "sector: int) -> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes,", "SyncString try: frame_in = self.ReadLine()#discard echo except TimeoutError: pass verified = False for", "ch = self.data_buffer_in.popleft() data.append(ch) if len(data) != num_bytes: print(data, len(data), num_bytes) assert len(data)", "uuids]) def ReadCRC(self, address: int, num_bytes: int) -> int: try: response_code = self.WriteCommand(\"S", "Complete.\") def WriteImage(self, image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 #make", "Found\" def RaiseReturnCodeError(code: int, call_name: str) -> None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise", "self.ReadLine().strip() print(\"Check Sectors Blank response\", response) except TimeoutError: pass if response_code not in", "prog = f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\",", "2\" : 0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused 3\" : 0x19,", "\"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" : 0x1d,", "- 1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0, last_sector) print(\"Checking Sectors are blank\")", "except TimeoutError: pass verified = False for i in range(3): try: frame_in =", "the boot version seems to be Minor then Major not like the docs", "two sections in the memory map are equal ''' def MemoryLocationsEqual(self, address1: int,", "-> int: for sector in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount - 1): return sector", "orig_image: bytes) -> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes + orig_image[len(vector_table_bytes):]", "Crc from .ISPChip import ISPChip NXPReturnCodes = { \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" :", "zlib from time import sleep import struct from timeout_decorator import timeout from timeout_decorator.timeout_decorator", "FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError: pass def InitConnection(self):", "if not synced: #Check for SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try:", "end: int): try: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) except Exception: response_code =", "start: int, num_bytes: int): assert num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush()", "self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as f: prog = f.read() #image =", "bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc, image) image_list = list(image) for byte in range(kuint32_t_size):", "end: int, wait_states: int = 2, mode: int = 0) -> str: assert", "print(\"\\nWriting Sector %d\"%sector) data_chunk = image[(sector-start_sector) * sector_bytes : (sector - start_sector +", "- 1) and length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0 def", "def CalculateCheckSum(frame) -> int: csum = 0 for entry in frame: csum +=", "= image[(sector-start_sector) * sector_bytes : (sector - start_sector + 1) * sector_bytes] self.WriteSector(sector,", "kWordSize = 4 kPageSizeBytes = 64 SectorSizePages = 16 MaxByteTransfer = 1024 StatusRespLength", "as w: print(\"Sync Failed\", w) print(\"Connect to running ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine()", "Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime)", "end: int): response_code = self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self,", "i, kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data after confirmation #", "i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try: frame_in = self.ReadLine() if", "self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self, string : bytes) -> None: #print(out) assert(type(string) is", "<= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0 def GetReturnCode(self) -> int: for", "def RamAddressLegal(self, address): return address >= self.RAMRange[0] and address <= self.RAMRange[1] def RamRangeLegal(self,", "import Crc from .ISPChip import ISPChip NXPReturnCodes = { \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\"", "int, thumb_mode: bool = False): ''' Start executing code at the specified spot", "= 7 #0x0000001c def FlashAddressLegal(self, address): return address >= self.FlashRange[0] and address <=", "see if two sections in the memory map are equal ''' def MemoryLocationsEqual(self,", "try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already equal to RAM, skipping write\") return except:", "data_read = self.ReadMemory(flash_address, len(data)) if len(data) != len(data_read): raise ValueError(\"Read Memory received incorrect", "and compare it to what was written ''' data_read = self.ReadMemory(flash_address, len(data)) if", "\"Blank Check Sectors\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code,", "bytes): #assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data = FillDataToFitSector(data, sector_bytes)", "vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image", "[0, 0] self.FlashRange = [0, 0] self.RAMStartWrite = 0 self.kCheckSumLocation = 7 #0x0000001c", "data_read def WriteFlashSector(self, sector: int, data: bytes): ram_address = self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages", "self.ReadLine().strip() major = self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) ''' Checks to see if two", "self.CrystalFrequency = 12000#khz == 30MHz self.SectorCount = 0 self.RAMSize = 0 self.RAMRange =", "docs say ''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor = self.ReadLine().strip()", ": 0x43218765, } kSleepTime = 1 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency", "= [] for i in range(4): sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self): response_code =", "#while i < len(data): # self.Write(\"W %d %d\"%(start + i, kWordSize)) # self.AssertReturnCode(\"Write", "end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int): assert", "MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes", "\"BUSY\" : 0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\"", "pass if not synced: #Check for SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString", "sector_bytes) def ReadImage(self, image_file: str): blank_sector = self.FindFirstBlankSector() with open(image_file, 'wb') as f:", "string, return the response code ''' def WriteCommand(self, command_string: str) -> int: self.Write(bytes(command_string", "GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes: # make this a valid image by", "sector: int, data: bytes): ram_address = self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0]", "FlashRangeLegal(self, address, length): print(self.FlashRange, address, length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length -", "#StopBits = 1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine,", "self.ClearSerialConnection() self.Flush() for i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try: frame_in", "''' self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate: int, stop_bits:", "\"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\"", "def RamRangeLegal(self, address, length): return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length <=", "%d\"%(start, end)) except Exception: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\")", "\"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\"", "is already equal to RAM, if so skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash", "Exception as e: print(e, type(e)) raise def SyncConnection(self): synced = False self.ClearSerialConnection() self.Flush()", "def GetReturnCode(self) -> int: for _ in range(10): #sleep(.1) try: resp = self.ReadLine().strip()", "return bytes(data) def PrepSectorsForWrite(self, start: int, end: int): try: response_code = self.WriteCommand(\"P %d", "NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids = [ self.ReadLine().strip(),", "AssertReturnCode(self, call_name: str) -> None: ''' Get a return code with no response", "image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 #make not bootable self.Unlock()", "sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self, image_file: str, start_sector: int): sector_bytes =", "sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc != ram_crc: print(\"CRC", "response_code = self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self, start: int,", "raise UserWarning( \"Return Code Failure in {} {} {}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc:", "code = self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self, string : bytes) -> None: #print(out)", "assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start, num_bytes)) response_code", "%d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start: int, end: int): response_code =", "''' def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int): self.Write(bytes((\"M %d %d %d\"%(address1,", "raise def SyncConnection(self): synced = False self.ClearSerialConnection() self.Flush() for i in range(5): self.Write(bytes('?'*15,", "1) * sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming", "self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self, flash_address: int, ram_address: int,", "ReadFlashSig(self, start: int, end: int, wait_states: int = 2, mode: int = 0)", "self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for uid in uuids]) def ReadCRC(self, address: int, num_bytes:", "sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address)) assert len(data) ==", "data\") if isinstance(type(data), data_read): raise TypeError(\"data written and data read are of different", "item[0] return \"Not Found\" def RaiseReturnCodeError(code: int, call_name: str) -> None: if int(code)", "\"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\"", "Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc == data_crc", "CRCLocation = 0x000002fc CRCValues = { \"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\" :", "''' code = self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self, string : bytes) -> None:", "TimeoutError from pycrc.algorithms import Crc from .ISPChip import ISPChip NXPReturnCodes = { \"CMD_SUCCESS\"", "int: #CRC32 polynomial = 0x104c11db6 crc = Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00)", "Memory and compare it to what was written ''' data_read = self.ReadMemory(flash_address, len(data))", "self.WriteCommand(\"P %d %d\"%(start, end)) except Exception: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code,", "in frame_in: verified = True break except TimeoutError: pass if not verified: raise", "address <= self.FlashRange[1]; def FlashRangeLegal(self, address, length): print(self.FlashRange, address, length) return self.FlashAddressLegal(address) and", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz == 30MHz self.SectorCount = 0", "%d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To Flash\") #sleep(.2) def Go(self, address:", "self.SectorCount = 0 self.RAMSize = 0 self.RAMRange = [0, 0] self.FlashRange = [0,", "start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 with open(image_file, 'rb') as", "self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine)", "= \"utf-8\")) ''' Takes the command string, return the response code ''' def", "1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes", "self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate: int, stop_bits: int = 1): '''", "def ReadBootCodeVersion(self): ''' LPC84x sends a 0x1a first for some reason. Also the", "image def FillDataToFitSector(data: bytes, size: int) -> bytes: if len(data) != size: data", "\"COMPARE_ERROR\" : 0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\"", "self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError: pass uid = self.ReadUID() print(\"Part", "self.WriteCommand(\"Z %d %d %d %d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig", "-> None: ''' Get a return code with no response ''' code =", "sector in range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector = self.SectorCount -", "'T' response_code = self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start: int,", "self.WriteCommand(\"I %d %d\"%(start, end)) try: self.ReadLine() response = self.ReadLine().strip() print(\"Check Sectors Blank response\",", "correct place vector_table_size = 8 kuint32_t_size = 4 # Make byte array into", "RAM %d bytes\"%len(data)) #while i < len(data): # self.Write(\"W %d %d\"%(start + i,", "WriteCommand(self, command_string: str) -> int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self):", "except Exception: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self,", "equal ''' def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int): self.Write(bytes((\"M %d %d", "= 4 MakeBootable(vector_table_loc, image) image_list = list(image) for byte in range(kuint32_t_size): image_list[vector_table_loc *", "range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size + byte] = 0 return bytes(image_list) # 2s compliment", "= 8 kuint32_t_size = 4 # Make byte array into list of little", "read are of different types\") return data == data_read def WriteFlashSector(self, sector: int,", "range(4): sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\")", "1\" else: command = \"A 0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def", ": 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused", ": 0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" : 0xff, } def", "32 bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) # calculate the checksum", "\"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\"", "Go(self, address: int, thumb_mode: bool = False): ''' Start executing code at the", "str) -> None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code Failure in", "entry in frame: csum += entry return (1<<32) - (csum % (1<<32)) def", "= b'' for vecval in intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval) return vector_table_bytes def", "<= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0 def RamAddressLegal(self, address): return address", "-> None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code Failure in {}", "frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified = False for i in range(3):", "data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data)", "in the memory map are equal ''' def MemoryLocationsEqual(self, address1: int, address2: int,", "and address <= self.FlashRange[1]; def FlashRangeLegal(self, address, length): print(self.FlashRange, address, length) return self.FlashAddressLegal(address)", "LPC84x sends a 0x1a first for some reason. Also the boot version seems", "return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str) -> None: ''' Get a return code", "complete the handler sends OK<CR><LF> response_code = self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write", "Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate) except Exception as e: print(e,", "0xff, } def GetErrorCodeName(code: int) -> str: code = int(code) for item in", "b'' for vecval in intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval) return vector_table_bytes def MakeBootable(vector_table_loc:", "self.ClearBuffer() self.Flush() for _ in range(2): try: self.ReadLine() except TimeoutError: pass def SetCrystalFrequency(self,", "not verified: raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data, flash_address: int) -> bool: '''", "== num_bytes return bytes(data) def PrepSectorsForWrite(self, start: int, end: int): try: response_code =", "= self.FindFirstBlankSector() with open(image_file, 'wb') as f: for sector in range(blank_sector): print(\"Sector \",", ": (sector - start_sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig =", "at the specified spot ''' mode = \"\" if thumb_mode: mode = 'T'", "0x104c11db6 crc = Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return", "crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes: # make this", "def ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids = [ self.ReadLine().strip(), self.ReadLine().strip(),", "sector_size_bytes) def WriteSector(self, sector: int, data: bytes): #assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data)", "self.RAMStartWrite = 0 self.kCheckSumLocation = 7 #0x0000001c def FlashAddressLegal(self, address): return address >=", "\"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\"", "RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int): assert self.RamRangeLegal(ram_address,", "in uuids]) def ReadCRC(self, address: int, num_bytes: int) -> int: try: response_code =", "int = 2, mode: int = 0) -> str: assert start < end", "i+=kWordSize #when transfer is complete the handler sends OK<CR><LF> response_code = self.WriteCommand(\"W %d", "= True break except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") def", "= GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes + orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data: bytes,", "int) -> str: code = int(code) for item in NXPReturnCodes.items(): if code ==", "in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size + byte] = 0 return bytes(image_list) # 2s", "raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in = self.ReadLine()#discard echo except TimeoutError:", "end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig = [] for i in", "f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog)) sector_count", "sector_bytes%self.kWordSize == 0 #make not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as", "flash_address, ram_address)) assert len(data) == sector_size_bytes #data += bytes(sector_size_bytes - len(data)) data_crc =", "different types\") return data == data_read def WriteFlashSector(self, sector: int, data: bytes): ram_address", "if isinstance(type(data), data_read): raise TypeError(\"data written and data read are of different types\")", "0x1f, \"NoStatusResponse\" : 0xff, } def GetErrorCodeName(code: int) -> str: code = int(code)", "Enables Flash Write, Erase, & Go ''' self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code,", "0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\"", "int, num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C %d", "0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" : 0x6,", "{} {} {}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size = 4", "raise ValueError(\"Read Memory received incorrect amount of data\") if isinstance(type(data), data_read): raise TypeError(\"data", "* kuint32_t_size + byte] = 0 return bytes(image_list) # 2s compliment of checksum", "0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" : 0x1e,", "int, end: int): try: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) except Exception: response_code", "\"Read Write FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError: pass", "sector_size_bytes #data += bytes(sector_size_bytes - len(data)) data_crc = zlib.crc32(data, 0) try: ram_crc =", "RAM\") self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError: return @timeout(4) def", "sig def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\") def ResetSerialConnection(self): self.Flush()", "bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation = 0x000002fc CRCValues = { \"NO_ISP\": 0x4e697370,", "self.RamAddressLegal(address + length) and length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0", "+ sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[(sector-start_sector) * sector_bytes : (sector -", "when enabled ''' if on: command = \"A 1\" else: command = \"A", "True break except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self,", "bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as f: prog = f.read() #image", "of different types\") return data == data_read def WriteFlashSector(self, sector: int, data: bytes):", "crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes: # make this a valid", "7 #0x0000001c def FlashAddressLegal(self, address): return address >= self.FlashRange[0] and address <= self.FlashRange[1];", "\"Not Found\" def RaiseReturnCodeError(code: int, call_name: str) -> None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]:", "from timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms import Crc from .ISPChip import ISPChip NXPReturnCodes", "length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0 def RamAddressLegal(self, address): return", "self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self): ''' Enables Flash Write, Erase,", "filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self,", "blank_sector = self.FindFirstBlankSector() with open(image_file, 'wb') as f: for sector in range(blank_sector): print(\"Sector", "Check to see if sector is already equal to RAM, if so skip", "== item[1]: return item[0] return \"Not Found\" def RaiseReturnCodeError(code: int, call_name: str) ->", "FlashAddressLegal(self, address): return address >= self.FlashRange[0] and address <= self.FlashRange[1]; def FlashRangeLegal(self, address,", "response code ''' def WriteCommand(self, command_string: str) -> int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\"))", "self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try: frame_in = self.ReadLine() if self.SyncString.strip() in frame_in.strip():", "as f: for sector in range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector", "= [] while self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch) if len(data) != num_bytes: print(data,", "= int(response[0]) if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code ==", "end)) try: self.ReadLine() response = self.ReadLine().strip() print(\"Check Sectors Blank response\", response) except TimeoutError:", "is 1 or 2 ''' response_code = self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set", "= [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for uid in uuids])", ": 0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" :", "ram_crc: print(\"CRC Check failed\", data_crc, ram_crc) else: break # Check to see if", "0 def RamAddressLegal(self, address): return address >= self.RAMRange[0] and address <= self.RAMRange[1] def", "set to %d\"%self.baud_rate) except Exception as e: print(e, type(e)) raise def SyncConnection(self): synced", "bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self,", "ram_address: int, num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C", "False self.ClearSerialConnection() self.Flush() for i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try:", "reason. Also the boot version seems to be Minor then Major not like", "TimeoutError: pass uid = self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code", "0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" : 0xa,", "item[1]: return item[0] return \"Not Found\" def RaiseReturnCodeError(code: int, call_name: str) -> None:", "== data_read def WriteFlashSector(self, sector: int, data: bytes): ram_address = self.RAMStartWrite sector_size_bytes =", "bytes: # make this a valid image by inserting a checksum in the", "return the response code ''' def WriteCommand(self, command_string: str) -> int: self.Write(bytes(command_string +", "if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def", "open(image_file, 'wb') as f: for sector in range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector)) def", "= self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids = [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return", "num_bytes=len(data)) if data_crc != ram_crc: print(\"CRC Check failed\", data_crc, ram_crc) else: break #", "data == data_read def WriteFlashSector(self, sector: int, data: bytes): ram_address = self.RAMStartWrite sector_size_bytes", "code ''' def WriteCommand(self, command_string: str) -> int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return", "= self.ReadLine() response_code = int(response[0]) if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\")", "return address >= self.FlashRange[0] and address <= self.FlashRange[1]; def FlashRangeLegal(self, address, length): print(self.FlashRange,", "#when transfer is complete the handler sends OK<CR><LF> response_code = self.WriteCommand(\"W %d %d\"%(start,", "memory map are equal ''' def MemoryLocationsEqual(self, address1: int, address2: int, num_bytes: int):", "UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data, flash_address: int) -> bool: ''' Read Memory and", "self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError: pass def InitConnection(self): self.ResetSerialConnection() try: try:", "vector_table_size = 8 kuint32_t_size = 4 # Make byte array into list of", "Command success is sent at the end of the transferr data = []", "response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except TimeoutError: response_code = self.WriteCommand(\"S %d %d\"%(address,", "self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self): ''' Enables Flash Write, Erase, & Go", "if len(data) != len(data_read): raise ValueError(\"Read Memory received incorrect amount of data\") if", ": bytes) -> None: #print(out) assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\"))", "def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address,", "= struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) # calculate the checksum over the interrupt vectors", "self.RAMRange[0] and address <= self.RAMRange[1] def RamRangeLegal(self, address, length): return self.RamAddressLegal(address) and self.RamAddressLegal(address", "0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start, num_bytes))", "the end of the transferr data = [] while self.data_buffer_in: ch = self.data_buffer_in.popleft()", "int, address2: int, num_bytes: int): self.Write(bytes((\"M %d %d %d\"%(address1, address2, num_bytes) + self.kNewLine),", "print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self) -> int: for sector in range(self.SectorCount):", "in intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval) return vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image: bytes)", "TypeError(\"data written and data read are of different types\") return data == data_read", "sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector, sector) assert sector_blank", "as f: prog = f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation, prog)", "Sector %d\"%sector) data_chunk = image[sector * sector_bytes : (sector + 1) * sector_bytes]", "\"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\"", "self.FlashRange[1]; def FlashRangeLegal(self, address, length): print(self.FlashRange, address, length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address +", "self.kNewLine) try: frame_in = self.ReadLine() if self.SyncString.strip() in frame_in.strip(): synced = True break", "address, length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1) and length <=", "encoding=\"utf-8\"))) verified = False for i in range(3): try: frame_in = self.ReadLine()#Should be", "\"A 0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self, start: int, data:", "if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\") return response_code ==", "sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 #make not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes))", "while len(self.data_buffer_in) < (num_bytes): self.Read() # Command success is sent at the end", "%d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address)) assert len(data) == sector_size_bytes #data +=", "interrupt vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 # clear csum value csum", "sector: int, data: bytes): #assert data sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data", "boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate) except", "_ in range(2): try: self.ReadLine() except TimeoutError: pass def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz", "and address%self.kPageSizeBytes == 0 def RamAddressLegal(self, address): return address >= self.RAMRange[0] and address", "and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d %d %d %d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code,", "''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor = self.ReadLine().strip() major =", "is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) ''' Takes the command string, return", "return sig def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\") def ResetSerialConnection(self):", "sig = [] for i in range(4): sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self): response_code", "bytes): ram_address = self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting", "Failure\") def CheckFlashWrite(self, data, flash_address: int) -> bool: ''' Read Memory and compare", "{}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc, image)", "print(self.ReadLine()) except TimeoutError: return @timeout(4) def ReadMemory(self, start: int, num_bytes: int): assert num_bytes%self.kWordSize", "= self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self, start: int, end:", "self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate", "-> bytes: # make this a valid image by inserting a checksum in", "= self.ReadLine() return int(resp) def ReadBootCodeVersion(self): ''' LPC84x sends a 0x1a first for", "= zlib.crc32(data, 0) try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc = self.ReadCRC(ram_address,", "= crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes: # make", "len(data))) return data class NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes = 64 SectorSizePages =", "array into list of little endian 32 bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size", "= \"A 1\" else: command = \"A 0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set", "if self.SyncVerified.strip() in frame_in: verified = True break except TimeoutError: pass if not", "== 30MHz self.SectorCount = 0 self.RAMSize = 0 self.RAMRange = [0, 0] self.FlashRange", "image by inserting a checksum in the correct place vector_table_size = 8 kuint32_t_size", "= f.read() image = prog print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector", "ReadMemory(self, start: int, num_bytes: int): assert num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\")", "MakeBootable(vector_table_loc, image) image_list = list(image) for byte in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size +", "= self.WriteCommand(\"Z %d %d %d %d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\")", "self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To Flash\") #sleep(.2) def", "compare it to what was written ''' data_read = self.ReadMemory(flash_address, len(data)) if len(data)", "assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self, sector: int, data: bytes): #assert data sector_bytes", "data: bytes): ram_address = self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] + sector*sector_size_bytes", "except TimeoutError: pass def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified =", "over the interrupt vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 # clear csum", "Sector %d\"%sector) data_chunk = image[(sector-start_sector) * sector_bytes : (sector - start_sector + 1)", ": 0xff, } def GetErrorCodeName(code: int) -> str: code = int(code) for item", "command_string: str) -> int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self): '''", "0x4, \"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" : 0x8,", "%d %d %d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig = []", "start_sector + sector_count <= self.SectorCount self.Unlock() for sector in reversed(range(start_sector, start_sector + sector_count)):", "len(data)) if len(data) != len(data_read): raise ValueError(\"Read Memory received incorrect amount of data\")", "self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) ''' Checks to see if two sections in the", "import TimeoutError from pycrc.algorithms import Crc from .ISPChip import ISPChip NXPReturnCodes = {", "''' Get a return code with no response ''' code = self.GetReturnCode() RaiseReturnCodeError(code,", "not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code", "0x43218765, } kSleepTime = 1 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency =", "start: int, end: int): try: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) except Exception:", "response_code = self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To Flash\")", "kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data after confirmation # i+=kWordSize", "RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc, image) image_list = list(image) for", "self.Unlock() for sector in reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk =", "reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[sector * sector_bytes : (sector + 1)", "RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor = self.ReadLine().strip() major = self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor))", "\"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\" :", "int, num_bytes: int) -> int: try: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except", "int, end: int): response_code = self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def", "range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector)) def MassErase(self): last_sector = self.SectorCount - 1 sleep(1)", "self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in) < (num_bytes): self.Read() #", "str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 #make not bootable self.Unlock() self.WriteSector(0,", "with open(image_file, 'rb') as f: prog = f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image", "self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc != ram_crc: print(\"CRC Check failed\", data_crc, ram_crc) else: break", "= self.ReadLine()#discard echo except TimeoutError: pass verified = False for i in range(3):", "= 0 return bytes(image_list) # 2s compliment of checksum def CalculateCheckSum(frame) -> int:", "from pycrc.algorithms import Crc from .ISPChip import ISPChip NXPReturnCodes = { \"CMD_SUCCESS\" :", "self.data_buffer_in.popleft() data.append(ch) if len(data) != num_bytes: print(data, len(data), num_bytes) assert len(data) == num_bytes", "while self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch) if len(data) != num_bytes: print(data, len(data), num_bytes)", "return self.SectorCount - 1 def ReadSector(self, sector: int) -> bytes: sector_bytes = self.SectorSizePages*self.kPageSizeBytes", "sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code, \"Read Write FAIM\") def", "assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime)", "def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes: # make this a valid image", "bool = False): ''' Start executing code at the specified spot ''' mode", "response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor = self.ReadLine().strip() major = self.ReadLine().strip()", "entry return (1<<32) - (csum % (1<<32)) def Crc32(frame) -> int: #CRC32 polynomial", "for sector in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount - 1): return sector return self.SectorCount", "int(minor)) ''' Checks to see if two sections in the memory map are", "%d %d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To Flash\") #sleep(.2) def Go(self,", "0 def GetReturnCode(self) -> int: for _ in range(10): #sleep(.1) try: resp =", "self.SectorCount for sector in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[sector * sector_bytes", "+ self.kNewLine) try: frame_in = self.ReadLine() if self.SyncString.strip() in frame_in.strip(): synced = True", "GetReturnCode(self) -> int: for _ in range(10): #sleep(.1) try: resp = self.ReadLine().strip() return", "= 8 #StopBits = 1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified", "response_code = self.WriteCommand(\"I %d %d\"%(start, end)) try: self.ReadLine() response = self.ReadLine().strip() print(\"Check Sectors", "self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already equal to RAM, skipping write\") return except: pass", "\"Read Flash Signature\") sig = [] for i in range(4): sig.append(self.ReadLine().strip()) return sig", "sector_count <= self.SectorCount self.Unlock() for sector in reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting Sector", "0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" : 0xff,", "0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" : 0x10,", "sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self, image_file:", "\"DST_ADDR_NOT_MAPPED\" : 0x5, \"COUNT_ERROR\" : 0x6, \"INVALID_SECTOR/INVALID_PAGE\" : 0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\"", "\"Erase Sectors\") def ErasePages(self, start: int, end: int): response_code = self.WriteCommand(\"X %d %d\"%(start,", ": 0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\" : 0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\"", ": 0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" :", "-> int: try: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except TimeoutError: response_code =", "\"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\"", "+ self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine() response_code = int(response[0]) if response_code not in", "FindFirstBlankSector(self) -> int: for sector in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount - 1): return", "CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes = b'' for vecval in intvecs_list: vector_table_bytes +=", "if thumb_mode: mode = 'T' response_code = self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\")", "and data read are of different types\") return data == data_read def WriteFlashSector(self,", "= self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 #make not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with", "%d %d\"%(start, end)) except Exception: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep", "isinstance(type(data), data_read): raise TypeError(\"data written and data read are of different types\") return", "from timeout_decorator import timeout from timeout_decorator.timeout_decorator import TimeoutError from pycrc.algorithms import Crc from", "\"NoStatusResponse\" : 0xff, } def GetErrorCodeName(code: int) -> str: code = int(code) for", "return int(self.ReadLine().strip()) def ReadFlashSig(self, start: int, end: int, wait_states: int = 2, mode:", "string : bytes) -> None: #print(out) assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding =", "kuint32_t_size]) # calculate the checksum over the interrupt vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc]", "str): blank_sector = self.FindFirstBlankSector() with open(image_file, 'wb') as f: for sector in range(blank_sector):", "bytes): assert len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start, len(data)) print(\"Write to RAM %d bytes\"%len(data))", "bytes) -> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes + orig_image[len(vector_table_bytes):] return", "num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code,", "UserWarning( \"Return Code Failure in {} {} {}\".format(call_name, GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int,", "# self.Write(\"W %d %d\"%(start + i, kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get confirmation #", "open(image_file, 'rb') as f: prog = f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image =", "0x1a first for some reason. Also the boot version seems to be Minor", "self.FindFirstBlankSector() with open(image_file, 'wb') as f: for sector in range(blank_sector): print(\"Sector \", sector)", "int = 1): ''' Baud Depends of FAIM config, stopbit is 1 or", "0 with open(image_file, 'rb') as f: prog = f.read() image = prog print(\"Program", "self.RamRangeLegal(start, len(data)) print(\"Write to RAM %d bytes\"%len(data)) #while i < len(data): # self.Write(\"W", "self.Read() # Command success is sent at the end of the transferr data", "Minor then Major not like the docs say ''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code,", "echos host when enabled ''' if on: command = \"A 1\" else: command", "16 MaxByteTransfer = 1024 StatusRespLength = len(ISPChip.kNewLine) + 1 #Parity = None #DataBits", "\"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" : 0xff, }", "are of different types\") return data == data_read def WriteFlashSector(self, sector: int, data:", "int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code Failure in {} {} {}\".format(call_name, GetErrorCodeName(code),", "self.ReadLine().strip() return int(resp) except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str) ->", "def WriteFlashSector(self, sector: int, data: bytes): ram_address = self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address", "command = \"A 1\" else: command = \"A 0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code,", "to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc ==", "(UserWarning, TimeoutError) as w: print(\"Sync Failed\", w) print(\"Connect to running ISP\") self.ClearSerialConnection() self.Echo(False)", ">= self.RAMRange[0] and address <= self.RAMRange[1] def RamRangeLegal(self, address, length): return self.RamAddressLegal(address) and", "- len(data)) data_crc = zlib.crc32(data, 0) try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except Exception:", "TimeoutError: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return int(self.ReadLine().strip()) def", "\"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" : 0xff, } def GetErrorCodeName(code: int) -> str: code", "print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush() print(\"R %d %d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R %d", "''' def WriteCommand(self, command_string: str) -> int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode()", "for i in range(4): sig.append(self.ReadLine().strip()) return sig def ReadWriteFAIM(self): response_code = self.WriteCommand(\"O\") RaiseReturnCodeError(response_code,", "data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) def WriteSector(self, sector: int, data: bytes): #assert data", "= RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes))", "so skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already equal to RAM, skipping write\")", "image[(sector-start_sector) * sector_bytes : (sector - start_sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk)", "self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self)", "is complete the handler sends OK<CR><LF> response_code = self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code,", "after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError: return @timeout(4) def ReadMemory(self, start: int,", "assert(type(string) is bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) ''' Takes the command string,", "data after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError: return @timeout(4) def ReadMemory(self, start:", "for sector in reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[(sector-start_sector)", "0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\" : 0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" :", "if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code Failure in {} {} {}\".format(call_name,", "self.Write(data[i:i+kWordSize])#Stream data after confirmation # i+=kWordSize #when transfer is complete the handler sends", "data_read): raise TypeError(\"data written and data read are of different types\") return data", "+ 1) * sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig)", "== 0 def RamAddressLegal(self, address): return address >= self.RAMRange[0] and address <= self.RAMRange[1]", "MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count <= self.SectorCount for", "self.FlashAddressLegal(address + length - 1) and length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes", "def WriteBinaryToFlash(self, image_file: str, start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0", "sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes)", "& Go ''' self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate:", "+= entry return (1<<32) - (csum % (1<<32)) def Crc32(frame) -> int: #CRC32", "PrepSectorsForWrite(self, start: int, end: int): try: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) except", "return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length <= self.RAMRange[1] - self.RAMRange[0] and", "''' data_read = self.ReadMemory(flash_address, len(data)) if len(data) != len(data_read): raise ValueError(\"Read Memory received", "+ i, kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data after confirmation", "pycrc.algorithms import Crc from .ISPChip import ISPChip NXPReturnCodes = { \"CMD_SUCCESS\" : 0x0,", "TimeoutError: pass def SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified = False", "response_code = int(response[0]) if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code", "except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data, flash_address:", "-> int: #CRC32 polynomial = 0x104c11db6 crc = Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True,", "self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0 def RamAddressLegal(self, address): return address >=", "self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address,", "self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self, image_file: str, start_sector: int):", "csum += entry return (1<<32) - (csum % (1<<32)) def Crc32(frame) -> int:", "try: response_code = self.WriteCommand(\"P %d %d\"%(start, end)) except Exception: response_code = self.WriteCommand(\"P %d", "\"\" if thumb_mode: mode = 'T' response_code = self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code,", "pass if not verified: raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data, flash_address: int) ->", "= 12000#khz == 30MHz self.SectorCount = 0 self.RAMSize = 0 self.RAMRange = [0,", "Sector\") sector_blank = self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to", "to RAM\") self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError: return @timeout(4)", "== NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids = [", "#self.Flush() print(\"R %d %d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read", "self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) ''' Takes the command string, return the response", "str) -> int: self.Write(bytes(command_string + self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self): ''' Enables", "= self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self, image_file: str): sector_bytes", "crc_calc = crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes: #", "types\") return data == data_read def WriteFlashSector(self, sector: int, data: bytes): ram_address =", "\"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\" : 0x87654321, \"CRP3\" : 0x43218765, } kSleepTime", "OK\\r\\n if self.SyncVerified.strip() in frame_in: verified = True break except TimeoutError: pass if", "def ReadFlashSig(self, start: int, end: int, wait_states: int = 2, mode: int =", "%d %d\"%(start, num_bytes)) response_code = self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while", "encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError: pass def InitConnection(self): self.ResetSerialConnection() try: try: self.SyncConnection() self.SetCrystalFrequency(self.CrystalFrequency)", "encoding=\"utf-8\")) response = self.ReadLine() response_code = int(response[0]) if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]):", "print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector)", "self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except TimeoutError: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code,", "poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int,", "pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str) -> None: ''' Get a return", "# make this a valid image by inserting a checksum in the correct", "%d %d %d %d\"%(start, end, wait_states, mode)) RaiseReturnCodeError(response_code, \"Read Flash Signature\") sig =", "no response ''' code = self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self, string : bytes)", "- len(data))) return data class NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes = 64 SectorSizePages", "int): assert num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer() #self.Flush()", "of checksum def CalculateCheckSum(frame) -> int: csum = 0 for entry in frame:", "= image[sector * sector_bytes : (sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig", "vector_table_bytes += struct.pack(\"<I\", vecval) return vector_table_bytes def MakeBootable(vector_table_loc: int, orig_image: bytes) -> bytes:", "int: csum = 0 for entry in frame: csum += entry return (1<<32)", "call_name: str) -> None: ''' Get a return code with no response '''", "self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate) except Exception as", "def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read() self.ClearBuffer() self.Flush() for _ in range(2):", "2, mode: int = 0) -> str: assert start < end assert(self.FlashAddressLegal(start) and", "sector) sleep(self.kSleepTime) assert self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector, sector) assert", "self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address, num_bytes))", "def SyncConnection(self): synced = False self.ClearSerialConnection() self.Flush() for i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\"))", "assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code = self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address,", "ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError: pass uid = self.ReadUID()", "\"Read UID\") uuids = [ self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for", "response_code = self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self, on: bool", "print(\"Programming Complete.\") def FindFirstBlankSector(self) -> int: for sector in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount", "%d\"%(address1, address2, num_bytes) + self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine() response_code = int(response[0]) if", "return \"Not Found\" def RaiseReturnCodeError(code: int, call_name: str) -> None: if int(code) !=", "except TimeoutError: pass if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"SECTOR_NOT_BLANK\"]): RaiseReturnCodeError(response_code, \"Blank Check Sectors\")", "RaiseReturnCodeError(response_code, \"Read Part ID\") resp = self.ReadLine() return int(resp) def ReadBootCodeVersion(self): ''' LPC84x", "(sector - start_sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0],", "see if sector is already equal to RAM, if so skip try: self.MemoryLocationsEqual(flash_address,", "print(\"CRC Check failed\", data_crc, ram_crc) else: break # Check to see if sector", "sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes)", "in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try: frame_in = self.ReadLine() if self.SyncString.strip()", "(sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature:", "Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count <= self.SectorCount self.Unlock() for", "chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def FindFirstBlankSector(self) -> int:", "#data += bytes(sector_size_bytes - len(data)) data_crc = zlib.crc32(data, 0) try: ram_crc = self.ReadCRC(ram_address,", "= self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Pages\") def CheckSectorsBlank(self, start: int, end:", "code at the specified spot ''' mode = \"\" if thumb_mode: mode =", "= 0 self.RAMRange = [0, 0] self.FlashRange = [0, 0] self.RAMStartWrite = 0", "return int(resp) except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self, call_name: str) -> None:", "response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\") resp", "len(data_read): raise ValueError(\"Read Memory received incorrect amount of data\") if isinstance(type(data), data_read): raise", "compliment of checksum def CalculateCheckSum(frame) -> int: csum = 0 for entry in", "sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 with open(image_file, 'rb') as f: prog", "SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation =", "this a valid image by inserting a checksum in the correct place vector_table_size", "if len(data) != num_bytes: print(data, len(data), num_bytes) assert len(data) == num_bytes return bytes(data)", "a 0x1a first for some reason. Also the boot version seems to be", "list of little endian 32 bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size])", "int, orig_image: bytes) -> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes +", "\"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation", "== 0 assert self.RamRangeLegal(start, len(data)) print(\"Write to RAM %d bytes\"%len(data)) #while i <", "Echo\") def WriteToRam(self, start: int, data: bytes): assert len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start,", "[] while self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch) if len(data) != num_bytes: print(data, len(data),", "stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self, on: bool = True): ''' ISP echos", "and self.FlashAddressLegal(address + length - 1) and length <= self.FlashRange[1] - self.FlashRange[0] and", "print(self.FlashRange, address, length) return self.FlashAddressLegal(address) and self.FlashAddressLegal(address + length - 1) and length", "and length <= self.FlashRange[1] - self.FlashRange[0] and address%self.kPageSizeBytes == 0 def RamAddressLegal(self, address):", "self.SectorCount self.Unlock() for sector in reversed(range(start_sector, start_sector + sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk", "vector_table_bytes = b'' for vecval in intvecs_list: vector_table_bytes += struct.pack(\"<I\", vecval) return vector_table_bytes", "TimeoutError: pass if not synced: #Check for SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo", "some reason. Also the boot version seems to be Minor then Major not", "Pages\") def CheckSectorsBlank(self, start: int, end: int) -> bool: assert start <= end", "self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified = False for i in range(3): try: frame_in", "Baudrate\") def Echo(self, on: bool = True): ''' ISP echos host when enabled", "return sector return self.SectorCount - 1 def ReadSector(self, sector: int) -> bytes: sector_bytes", "0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" : 0xff, } def GetErrorCodeName(code: int) -> str:", "print(data, len(data), num_bytes) assert len(data) == num_bytes return bytes(data) def PrepSectorsForWrite(self, start: int,", "sector) sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data))", "already equal to RAM, if so skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already", "0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\" : 0xc, \"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" : 0xe,", "try: frame_in = self.ReadLine()#discard echo except TimeoutError: pass verified = False for i", "bytes) self.WriteSerial(string) #self.WriteSerial(bytes(self.kNewLine, encoding = \"utf-8\")) ''' Takes the command string, return the", "return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\") uuids", "FAIM config, stopbit is 1 or 2 ''' response_code = self.WriteCommand(\"B {} {}\".format(baud_rate,", "\"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" : 0x13, \"Unused 1\" : 0x14,", ": 0x1e, \"FAIM_NO_CLOCK\" : 0x1f, \"NoStatusResponse\" : 0xff, } def GetErrorCodeName(code: int) ->", "print(\"Connect to running ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError: pass", "start: int, data: bytes): assert len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start, len(data)) print(\"Write to", "#self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in = self.ReadLine()#discard echo except TimeoutError: pass verified =", "Failed\", w) print(\"Connect to running ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer() except", "self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address)) assert", "sector in range(self.SectorCount): if self.CheckSectorsBlank(sector, self.SectorCount - 1): return sector return self.SectorCount -", "num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in) < (num_bytes): self.Read() # Command success is", "self.ClearBuffer() response_code = self.WriteCommand(\"U 23130\") RaiseReturnCodeError(response_code, \"Unlock\") def SetBaudRate(self, baud_rate: int, stop_bits: int", "self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0, last_sector) print(\"Checking Sectors are blank\") assert self.CheckSectorsBlank(0, last_sector)", "%x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address)) assert len(data) == sector_size_bytes #data += bytes(sector_size_bytes -", "sends OK<CR><LF> response_code = self.WriteCommand(\"W %d %d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream", "like the docs say ''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor", "= True break except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization", "data_crc != ram_crc: print(\"CRC Check failed\", data_crc, ram_crc) else: break # Check to", ": 0x7, \"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\" :", ": 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\" :", "assert(len(data) > 0) filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) ==", "FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self, image_file: str,", "bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as f: prog = f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog)", "%d %d %d\"%(address1, address2, num_bytes) + self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine() response_code =", "SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes =", "* sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming", "address2, num_bytes) + self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine() response_code = int(response[0]) if response_code", "!= ram_crc: print(\"CRC Check failed\", data_crc, ram_crc) else: break # Check to see", "of the transferr data = [] while self.data_buffer_in: ch = self.data_buffer_in.popleft() data.append(ch) if", "True break except TimeoutError: pass if not synced: #Check for SyncString raise UserWarning(\"Syncronization", "RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine()) except TimeoutError:", "Flash Signature\") sig = [] for i in range(4): sig.append(self.ReadLine().strip()) return sig def", "try: self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError: pass uid = self.ReadUID() print(\"Part UID: %s\"%uid)", "image) image_list = list(image) for byte in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size + byte]", "for i in range(5): self.Write(bytes('?'*15, encoding=\"utf-8\")) #self.Write('?' + self.kNewLine) try: frame_in = self.ReadLine()", "self.CheckSectorsBlank(sector, sector) sleep(self.kSleepTime) print(\"Prep Sector\") sector_blank = self.CheckSectorsBlank(sector, sector) assert sector_blank sleep(self.kSleepTime) self.PrepSectorsForWrite(sector,", "%d\"%sector) data_chunk = image[sector * sector_bytes : (sector + 1) * sector_bytes] self.WriteSector(sector,", "bytes(image_list) # 2s compliment of checksum def CalculateCheckSum(frame) -> int: csum = 0", "\"A 1\" else: command = \"A 0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\")", "f: prog = f.read() #image = RemoveBootableCheckSum(self.kCheckSumLocation, prog) image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program", "self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector,", "image: bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc, image) image_list = list(image) for byte in", "== 0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file: str): blank_sector = self.FindFirstBlankSector() with", "int(code) for item in NXPReturnCodes.items(): if code == item[1]: return item[0] return \"Not", "Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc:", "UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in = self.ReadLine()#discard echo except TimeoutError: pass", "self.Flush() self.Read() self.ClearBuffer() self.Flush() for _ in range(2): try: self.ReadLine() except TimeoutError: pass", "1): return sector return self.SectorCount - 1 def ReadSector(self, sector: int) -> bytes:", "try: frame_in = self.ReadLine() if self.SyncString.strip() in frame_in.strip(): synced = True break except", "len(data) != num_bytes: print(data, len(data), num_bytes) assert len(data) == num_bytes return bytes(data) def", "str, start_sector: int): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 with open(image_file, 'rb')", "assert sector_bytes%self.kWordSize == 0 return self.ReadMemory(sector*sector_bytes, sector_bytes) def ReadImage(self, image_file: str): blank_sector =", "ReadCRC(self, address: int, num_bytes: int) -> int: try: response_code = self.WriteCommand(\"S %d %d\"%(address,", "in reversed(range(sector_count)): print(\"\\nWriting Sector %d\"%sector) data_chunk = image[sector * sector_bytes : (sector +", "flash_address: int) -> bool: ''' Read Memory and compare it to what was", "MassErase(self): last_sector = self.SectorCount - 1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0, last_sector)", "Checks to see if two sections in the memory map are equal '''", "\"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code, \"Read UID\")", "start <= end response_code = self.WriteCommand(\"I %d %d\"%(start, end)) try: self.ReadLine() response =", "**kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz == 30MHz self.SectorCount = 0 self.RAMSize =", "4 kPageSizeBytes = 64 SectorSizePages = 16 MaxByteTransfer = 1024 StatusRespLength = len(ISPChip.kNewLine)", "not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file, 'rb') as f: prog = f.read()", "UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to", "mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start: int, end: int): response_code = self.WriteCommand(\"E %d", "#CRC32 polynomial = 0x104c11db6 crc = Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc", "\"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused 3\" : 0x19, \"Unused 4\" :", "at the end of the transferr data = [] while self.data_buffer_in: ch =", "bool: ''' Read Memory and compare it to what was written ''' data_read", "of FAIM config, stopbit is 1 or 2 ''' response_code = self.WriteCommand(\"B {}", "try: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except TimeoutError: response_code = self.WriteCommand(\"S %d", "range(3): try: frame_in = self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip() in frame_in: verified =", "\"SECTOR_NOT_BLANK\" : 0x8, \"SECTOR_NOT_PREPARED_FOR_WRITE_OPERATION\" : 0x9, \"COMPARE_ERROR\" : 0xa, \"BUSY\" : 0xb, \"PARAM_ERROR\"", "chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self, image_file: str):", "''' Checks to see if two sections in the memory map are equal", "list(image) for byte in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size + byte] = 0 return", "prog print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert start_sector + sector_count <= self.SectorCount", "address1: int, address2: int, num_bytes: int): self.Write(bytes((\"M %d %d %d\"%(address1, address2, num_bytes) +", "*args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz == 30MHz self.SectorCount = 0 self.RAMSize", "sector_bytes%self.kWordSize == 0 with open(image_file, 'rb') as f: prog = f.read() image =", "self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self, image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes", "synced: #Check for SyncString raise UserWarning(\"Syncronization Failure\") #self.Flush() self.Write(self.SyncStringBytes)#echo SyncString try: frame_in =", "ISPChip NXPReturnCodes = { \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" : 0x2,", "a valid image by inserting a checksum in the correct place vector_table_size =", "%d %d\"%(start + i, kWordSize)) # self.AssertReturnCode(\"Write to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data", "RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self, start: int, end: int): response_code = self.WriteCommand(\"X %d", "the command string, return the response code ''' def WriteCommand(self, command_string: str) ->", "num_bytes)) except TimeoutError: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) RaiseReturnCodeError(response_code, \"Read CRC\") return", "str) -> None: ''' Get a return code with no response ''' code", ">= self.FlashRange[0] and address <= self.FlashRange[1]; def FlashRangeLegal(self, address, length): print(self.FlashRange, address, length)", "%x\\n\"%(sector, flash_address, ram_address)) assert len(data) == sector_size_bytes #data += bytes(sector_size_bytes - len(data)) data_crc", "= int(code) for item in NXPReturnCodes.items(): if code == item[1]: return item[0] return", "image = vector_table_bytes + orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data: bytes, size: int) ->", "Crc32(frame) -> int: #CRC32 polynomial = 0x104c11db6 crc = Crc(width=32, poly=polynomial, reflect_in=True, xor_in=(1<<32)-1,", "byte array into list of little endian 32 bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size,", ": 0x16, \"EFRO_NO_POWER\" : 0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused 3\" : 0x19, \"Unused", "def Unlock(self): ''' Enables Flash Write, Erase, & Go ''' self.ClearBuffer() response_code =", "Memory received incorrect amount of data\") if isinstance(type(data), data_read): raise TypeError(\"data written and", "frame_in: verified = True break except TimeoutError: pass if not verified: raise UserWarning(\"Verification", "= self.SectorSizePages*self.kPageSizeBytes assert(len(data) > 0) filled_data = FillDataToFitSector(data, sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert", "bytes) -> bytes: # make this a valid image by inserting a checksum", "= self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start: int, end: int):", "bytes: if len(data) != size: data += bytes([0xff] *(size - len(data))) return data", "print(\"\\nWriting Sector %d\"%sector) data_chunk = image[sector * sector_bytes : (sector + 1) *", "# 2s compliment of checksum def CalculateCheckSum(frame) -> int: csum = 0 for", "little endian 32 bit words intvecs = struct.unpack(\"<%dI\"%vector_table_size, orig_image[:vector_table_size * kuint32_t_size]) # calculate", "and address%self.kWordSize == 0 def GetReturnCode(self) -> int: for _ in range(10): #sleep(.1)", "sector_bytes : (sector - start_sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig", "CheckFlashWrite(self, data, flash_address: int) -> bool: ''' Read Memory and compare it to", "#DataBits = 8 #StopBits = 1 SyncString = \"Synchronized\"+ISPChip.kNewLine SyncStringBytes = bytes(SyncString, encoding=\"utf-8\")", "self.SectorCount - 1 sleep(1) self.ClearBuffer() self.Unlock() self.PrepSectorsForWrite(0, last_sector) self.EraseSector(0, last_sector) print(\"Checking Sectors are", "to be Minor then Major not like the docs say ''' response_code =", "Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\") def WriteImage(self, image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize", "def CheckFlashWrite(self, data, flash_address: int) -> bool: ''' Read Memory and compare it", "int, end: int, wait_states: int = 2, mode: int = 0) -> str:", "with open(image_file, 'wb') as f: for sector in range(blank_sector): print(\"Sector \", sector) f.write(self.ReadSector(sector))", "class NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes = 64 SectorSizePages = 16 MaxByteTransfer =", "mode: int = 0) -> str: assert start < end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end))", "of data\") if isinstance(type(data), data_read): raise TypeError(\"data written and data read are of", "start_sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash", "%d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Prep Sectors\") def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes:", "reflect_in=True, xor_in=(1<<32)-1, reflect_out=True, xor_out=0x00) crc_calc = crc.bit_by_bit(frame) return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image:", "and address <= self.RAMRange[1] def RamRangeLegal(self, address, length): return self.RamAddressLegal(address) and self.RamAddressLegal(address +", "RAM, if so skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes) print(\"Flash already equal to RAM,", "0x18, \"Unused 3\" : 0x19, \"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\" : 0x1b, \"REINVOKE_ISP_CONFIG\"", "NXPReturnCodes = { \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\"", "int) -> bool: assert start <= end response_code = self.WriteCommand(\"I %d %d\"%(start, end))", "for uid in uuids]) def ReadCRC(self, address: int, num_bytes: int) -> int: try:", "bytes([0xff] *(size - len(data))) return data class NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes =", "after confirmation # i+=kWordSize #when transfer is complete the handler sends OK<CR><LF> response_code", "self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError: pass uid = self.ReadUID() print(\"Part UID:", "sector_bytes : (sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1])", "def Crc32(frame) -> int: #CRC32 polynomial = 0x104c11db6 crc = Crc(width=32, poly=polynomial, reflect_in=True,", "= { \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" :", "w) print(\"Connect to running ISP\") self.ClearSerialConnection() self.Echo(False) try: self.ReadLine() self.Flush() self.ClearBuffer() except TimeoutError:", "NXPReturnCodes CRCLocation = 0x000002fc CRCValues = { \"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\"", "data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc != ram_crc:", "= self.WriteCommand(\"C %d %d %d\"%(flash_address, ram_address, num_bytes)) RaiseReturnCodeError(response_code, \"Copy RAM To Flash\") #sleep(.2)", "image_list = list(image) for byte in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size + byte] =", "* sector_bytes : (sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0],", "+= bytes(sector_size_bytes - len(data)) data_crc = zlib.crc32(data, 0) try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))", "ReadPartID(self): response_code = self.WriteCommand(\"J\") RaiseReturnCodeError(response_code, \"Read Part ID\") resp = self.ReadLine() return int(resp)", "say ''' response_code = self.WriteCommand(\"K\") RaiseReturnCodeError(response_code, \"Read Bootcode Version\") minor = self.ReadLine().strip() major", "self.ReadLine() response_code = int(response[0]) if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"], NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return", "TimeoutError: return @timeout(4) def ReadMemory(self, start: int, num_bytes: int): assert num_bytes%self.kWordSize == 0", "return crc_calc def GetCheckSumedVectorTable(vector_table_loc: int, orig_image: bytes) -> bytes: # make this a", "from .ISPChip import ISPChip NXPReturnCodes = { \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" : 0x1,", "frame: csum += entry return (1<<32) - (csum % (1<<32)) def Crc32(frame) ->", "address >= self.FlashRange[0] and address <= self.FlashRange[1]; def FlashRangeLegal(self, address, length): print(self.FlashRange, address,", "def EraseSector(self, start: int, end: int): response_code = self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code,", "SetCrystalFrequency(self, frequency_khz: int): self.Write((bytes(\"%d\"%frequency_khz + self.kNewLine, encoding=\"utf-8\"))) verified = False for i in", "RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self, start: int, end: int): response_code = self.WriteCommand(\"E %d %d\"%(start,", "to RAM\")#get confirmation # self.Write(data[i:i+kWordSize])#Stream data after confirmation # i+=kWordSize #when transfer is", "GetErrorCodeName(code), code)) def RemoveBootableCheckSum(vector_table_loc: int, image: bytes): kuint32_t_size = 4 MakeBootable(vector_table_loc, image) image_list", "as e: print(e, type(e)) raise def SyncConnection(self): synced = False self.ClearSerialConnection() self.Flush() for", "0x17, \"FLASH_NO_POWER\" : 0x18, \"Unused 3\" : 0x19, \"Unused 4\" : 0x1a, \"FLASH_NO_CLOCK\"", "clear csum value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes = b'' for", "2 ''' response_code = self.WriteCommand(\"B {} {}\".format(baud_rate, stop_bits)) RaiseReturnCodeError(response_code, \"Set Baudrate\") def Echo(self,", "response_code = self.WriteCommand(\"R %d %d\"%(start, num_bytes)) RaiseReturnCodeError(response_code, \"Read Memory\") while len(self.data_buffer_in) < (num_bytes):", "\"Prep Sectors\") def CopyRAMToFlash(self, flash_address: int, ram_address: int, num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes)", "sector_bytes) self.WriteFlashSector(sector, filled_data) sleep(self.kSleepTime) #assert self.ReadSector(sector) == data_chunk def WriteBinaryToFlash(self, image_file: str, start_sector:", "+ self.kNewLine, encoding=\"utf-8\")) return self.GetReturnCode() def Unlock(self): ''' Enables Flash Write, Erase, &", "mode = 'T' response_code = self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def EraseSector(self,", "in range(3): try: frame_in = self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip() in frame_in: verified", "0x1b, \"REINVOKE_ISP_CONFIG\" : 0x1c, \"NO_VALID_IMAGE\" : 0x1d, \"FAIM_NO_POWER\" : 0x1e, \"FAIM_NO_CLOCK\" : 0x1f,", "0) -> str: assert start < end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z", "encoding=\"utf-8\") SyncVerified = bytes(\"OK\"+ISPChip.kNewLine, encoding=\"utf-8\") ReturnCodes = NXPReturnCodes CRCLocation = 0x000002fc CRCValues =", "write\") return except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector, sector)", "def WriteImage(self, image_file: str): sector_bytes = self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 #make not", ": 0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\" : 0x11, \"INVALID_STOP_BIT\" : 0x12, \"CODE_READ_PROTECTION_ENABLED\" :", ".ISPChip import ISPChip NXPReturnCodes = { \"CMD_SUCCESS\" : 0x0, \"INVALID_COMMAND\" : 0x1, \"SRC_ADDR_ERROR\"", "else: break # Check to see if sector is already equal to RAM,", "echo except TimeoutError: pass verified = False for i in range(3): try: frame_in", "frame_in.strip(): synced = True break except TimeoutError: pass if not synced: #Check for", "\"Copy RAM To Flash\") #sleep(.2) def Go(self, address: int, thumb_mode: bool = False):", "self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) while ram_crc != data_crc: sleep(self.kSleepTime)", "equal to RAM, skipping write\") return except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime)", "NXPChip(ISPChip): kWordSize = 4 kPageSizeBytes = 64 SectorSizePages = 16 MaxByteTransfer = 1024", "response ''' code = self.GetReturnCode() RaiseReturnCodeError(code, call_name) def Write(self, string : bytes) ->", "end assert(self.FlashAddressLegal(start) and self.FlashAddressLegal(end)) response_code = self.WriteCommand(\"Z %d %d %d %d\"%(start, end, wait_states,", "length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0 def GetReturnCode(self) -> int:", "self.GetReturnCode() def Unlock(self): ''' Enables Flash Write, Erase, & Go ''' self.ClearBuffer() response_code", "self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0 def GetReturnCode(self) -> int: for _", "%d\"%(start, len(data))) RaiseReturnCodeError(response_code, \"Write to RAM\") self.Write(data)#Stream data after confirmation #self.Write(\"OK\"+self.kNewLine) try: print(self.ReadLine())", "vector_table_bytes + orig_image[len(vector_table_bytes):] return image def FillDataToFitSector(data: bytes, size: int) -> bytes: if", "int, end: int) -> bool: assert start <= end response_code = self.WriteCommand(\"I %d", "def Go(self, address: int, thumb_mode: bool = False): ''' Start executing code at", "pass verified = False for i in range(3): try: frame_in = self.ReadLine()#Should be", "ReturnCodes = NXPReturnCodes CRCLocation = 0x000002fc CRCValues = { \"NO_ISP\": 0x4e697370, \"CRP1\" :", "None: if int(code) != NXPReturnCodes[\"CMD_SUCCESS\"]: raise UserWarning( \"Return Code Failure in {} {}", "= 0 self.RAMSize = 0 self.RAMRange = [0, 0] self.FlashRange = [0, 0]", "start: int, end: int) -> bool: assert start <= end response_code = self.WriteCommand(\"I", "data: bytes): assert len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start, len(data)) print(\"Write to RAM %d", "Bootcode Version\") minor = self.ReadLine().strip() major = self.ReadLine().strip() return \"%d.%d\"%(int(major), int(minor)) ''' Checks", "{ \"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\" : 0x87654321, \"CRP3\" : 0x43218765, }", "-> bytes: vector_table_bytes = GetCheckSumedVectorTable(vector_table_loc, orig_image) image = vector_table_bytes + orig_image[len(vector_table_bytes):] return image", "thumb_mode: mode = 'T' response_code = self.WriteCommand(\"G %d %s\"%(address, mode)) RaiseReturnCodeError(response_code, \"Go\") def", "Flash\") #sleep(.2) def Go(self, address: int, thumb_mode: bool = False): ''' Start executing", "value csum = CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes = b'' for vecval in", "end response_code = self.WriteCommand(\"I %d %d\"%(start, end)) try: self.ReadLine() response = self.ReadLine().strip() print(\"Check", "uid in uuids]) def ReadCRC(self, address: int, num_bytes: int) -> int: try: response_code", "= CalculateCheckSum(intvecs_list) intvecs_list[vector_table_loc] = csum vector_table_bytes = b'' for vecval in intvecs_list: vector_table_bytes", "len(data)%self.kWordSize == 0 assert self.RamRangeLegal(start, len(data)) print(\"Write to RAM %d bytes\"%len(data)) #while i", "verified: raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data, flash_address: int) -> bool: ''' Read", "ReadImage(self, image_file: str): blank_sector = self.FindFirstBlankSector() with open(image_file, 'wb') as f: for sector", "int, ram_address: int, num_bytes: int): assert self.RamRangeLegal(ram_address, num_bytes) assert self.FlashRangeLegal(flash_address, num_bytes) response_code =", "except Exception as e: print(e, type(e)) raise def SyncConnection(self): synced = False self.ClearSerialConnection()", "size: int) -> bytes: if len(data) != size: data += bytes([0xff] *(size -", "True break except TimeoutError: pass if not verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\")", "print(\"\\nWriting Sector: %d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address)) assert len(data) == sector_size_bytes", "def ErasePages(self, start: int, end: int): response_code = self.WriteCommand(\"X %d %d\"%(start, end)) RaiseReturnCodeError(response_code,", "\"ADDR_ERROR\" : 0xd, \"ADDR_NOT_MAPPED\" : 0xe, \"CMD_LOCKED\" : 0xf, \"INVALID_CODE\" : 0x10, \"INVALID_BAUD_RATE\"", "sector_bytes] self.WriteSector(sector, data_chunk) sleep(1) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash Signature: %s\"%chip_flash_sig) print(\"Programming Complete.\")", "self.SectorSizePages*self.kPageSizeBytes assert sector_bytes%self.kWordSize == 0 #make not bootable self.Unlock() self.WriteSector(0, bytes([0xde]*sector_bytes)) with open(image_file,", "= 1024 StatusRespLength = len(ISPChip.kNewLine) + 1 #Parity = None #DataBits = 8", "= 1 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.CrystalFrequency = 12000#khz == 30MHz", "num_bytes: int): self.Write(bytes((\"M %d %d %d\"%(address1, address2, num_bytes) + self.kNewLine), encoding=\"utf-8\")) response =", "as f: prog = f.read() image = prog print(\"Program Length:\", len(prog)) sector_count =", "sector return self.SectorCount - 1 def ReadSector(self, sector: int) -> bytes: sector_bytes =", "Takes the command string, return the response code ''' def WriteCommand(self, command_string: str)", "ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert flash_crc == data_crc assert self.MemoryLocationsEqual(flash_address,", "len(self.data_buffer_in) < (num_bytes): self.Read() # Command success is sent at the end of", "command = \"A 0\" response_code = self.WriteCommand(command) RaiseReturnCodeError(response_code, \"Set Echo\") def WriteToRam(self, start:", "kuint32_t_size + byte] = 0 return bytes(image_list) # 2s compliment of checksum def", "for some reason. Also the boot version seems to be Minor then Major", "sector is already equal to RAM, if so skip try: self.MemoryLocationsEqual(flash_address, ram_address, sector_size_bytes)", "self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash Address:", "= self.RAMStartWrite sector_size_bytes = self.kPageSizeBytes*self.SectorSizePages flash_address = self.FlashRange[0] + sector*sector_size_bytes print(\"\\nWriting Sector: %d\\nFlash", "kPageSizeBytes = 64 SectorSizePages = 16 MaxByteTransfer = 1024 StatusRespLength = len(ISPChip.kNewLine) +", "''' Read Memory and compare it to what was written ''' data_read =", "end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self, start: int, end: int): response_code = self.WriteCommand(\"X", "= 4 kPageSizeBytes = 64 SectorSizePages = 16 MaxByteTransfer = 1024 StatusRespLength =", "self.ReadLine().strip(), self.ReadLine().strip()] return \" \".join([\"0x%08x\"%int(uid) for uid in uuids]) def ReadCRC(self, address: int,", "ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError: pass def InitConnection(self): self.ResetSerialConnection() try:", "+ length) and length <= self.RAMRange[1] - self.RAMRange[0] and address%self.kWordSize == 0 def", "self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate) except Exception as e: print(e, type(e)) raise def", "#sleep(.2) def Go(self, address: int, thumb_mode: bool = False): ''' Start executing code", "%s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate)", "the checksum over the interrupt vectors intvecs_list = list(intvecs[:vector_table_size]) intvecs_list[vector_table_loc] = 0 #", "try: resp = self.ReadLine().strip() return int(resp) except ValueError: pass return self.ReturnCodes[\"NoStatusResponse\"] def AssertReturnCode(self,", "%d bytes\"%len(data)) #while i < len(data): # self.Write(\"W %d %d\"%(start + i, kWordSize))", "ram_crc != data_crc: sleep(self.kSleepTime) self.WriteToRam(ram_address, data) sleep(self.kSleepTime) ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) if data_crc", "= self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate)", "if not verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer()", "mode = \"\" if thumb_mode: mode = 'T' response_code = self.WriteCommand(\"G %d %s\"%(address,", "self.FlashRange = [0, 0] self.RAMStartWrite = 0 self.kCheckSumLocation = 7 #0x0000001c def FlashAddressLegal(self,", "Write FAIM\") def ResetSerialConnection(self): self.Flush() self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) try: self.ReadLine() except TimeoutError: pass def", "= self.ReadBootCodeVersion() print(\"Boot Code Version: %s\"%boot_code_version) self.SetBaudRate(self.baud_rate) print(\"Baudrate set to %d\"%self.baud_rate) except Exception", "Sector: %d\\nFlash Address: %x\\nRAM Address: %x\\n\"%(sector, flash_address, ram_address)) assert len(data) == sector_size_bytes #data", "= { \"NO_ISP\": 0x4e697370, \"CRP1\" : 0x12345678, \"CRP2\" : 0x87654321, \"CRP3\" : 0x43218765,", "0x13, \"Unused 1\" : 0x14, \"USER_CODE_CHECKSUM\" : 0x15, \"Unused 2\" : 0x16, \"EFRO_NO_POWER\"", "end: int) -> bool: assert start <= end response_code = self.WriteCommand(\"I %d %d\"%(start,", "verified: raise UserWarning(\"Verification Failure\") print(\"Syncronization Successful\") def ClearSerialConnection(self): self.Write(bytes(self.kNewLine, encoding=\"utf-8\")) self.ClearBuffer() self.Flush() self.Read()", ": (sector + 1) * sector_bytes] self.WriteSector(sector, data_chunk) chip_flash_sig = self.ReadFlashSig(self.FlashRange[0], self.FlashRange[1]) print(\"Flash", ": 0x1, \"SRC_ADDR_ERROR\" : 0x2, \"DST_ADDR_ERROR\" : 0x3, \"SRC_ADDR_NOT_MAPPED\" : 0x4, \"DST_ADDR_NOT_MAPPED\" :", "**kwargs) self.CrystalFrequency = 12000#khz == 30MHz self.SectorCount = 0 self.RAMSize = 0 self.RAMRange", "raise UserWarning(\"Verification Failure\") def CheckFlashWrite(self, data, flash_address: int) -> bool: ''' Read Memory", "frame_in = self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip() in frame_in: verified = True break", "True): ''' ISP echos host when enabled ''' if on: command = \"A", "8 kuint32_t_size = 4 # Make byte array into list of little endian", "image = MakeBootable(self.kCheckSumLocation, prog) print(\"Program Length:\", len(prog)) sector_count = int(math.ceil(len(prog)/sector_bytes)) assert sector_count <=", "address, length): return self.RamAddressLegal(address) and self.RamAddressLegal(address + length) and length <= self.RAMRange[1] -", "False for i in range(3): try: frame_in = self.ReadLine()#Should be OK\\r\\n if self.SyncVerified.strip()", "self.kNewLine), encoding=\"utf-8\")) response = self.ReadLine() response_code = int(response[0]) if response_code not in (NXPReturnCodes[\"CMD_SUCCESS\"],", "self.ClearBuffer() except TimeoutError: pass uid = self.ReadUID() print(\"Part UID: %s\"%uid) boot_code_version = self.ReadBootCodeVersion()", "end: int): response_code = self.WriteCommand(\"E %d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self,", "confirmation # self.Write(data[i:i+kWordSize])#Stream data after confirmation # i+=kWordSize #when transfer is complete the", "is sent at the end of the transferr data = [] while self.data_buffer_in:", "NXPReturnCodes[\"COMPARE_ERROR\"]): RaiseReturnCodeError(response_code, \"Compare\") return response_code == NXPReturnCodes[\"CMD_SUCCESS\"] def ReadUID(self): response_code = self.WriteCommand(\"N\") RaiseReturnCodeError(response_code,", "%d %d\"%(start, end)) RaiseReturnCodeError(response_code, \"Erase Sectors\") def ErasePages(self, start: int, end: int): response_code", "from time import sleep import struct from timeout_decorator import timeout from timeout_decorator.timeout_decorator import", "sleep(self.kSleepTime) print(\"Write to Flash\") self.CopyRAMToFlash(flash_address, ram_address, sector_size_bytes) sleep(self.kSleepTime) flash_crc = self.ReadCRC(flash_address, num_bytes=len(data)) assert", "num_bytes: int): assert num_bytes%self.kWordSize == 0 assert self.RamRangeLegal(start, num_bytes) print(\"ReadMemory\") #self.Flush() #self.Read() #self.ClearBuffer()", "To Flash\") #sleep(.2) def Go(self, address: int, thumb_mode: bool = False): ''' Start", "= self.ReadMemory(flash_address, len(data)) if len(data) != len(data_read): raise ValueError(\"Read Memory received incorrect amount", "zlib.crc32(data, 0) try: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data)) except Exception: ram_crc = self.ReadCRC(ram_address, num_bytes=len(data))", "skipping write\") return except: pass print(\"Prep Sector\") self.PrepSectorsForWrite(sector, sector) sleep(self.kSleepTime) print(\"Erase Sector\") self.EraseSector(sector,", "num_bytes: int) -> int: try: response_code = self.WriteCommand(\"S %d %d\"%(address, num_bytes)) except TimeoutError:", "4 MakeBootable(vector_table_loc, image) image_list = list(image) for byte in range(kuint32_t_size): image_list[vector_table_loc * kuint32_t_size" ]
[ "parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data',", "= time.time() parser = argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args() if args.function == 'info':", "Number: {phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4])) def show_seller_info(args): # TODO", "sub_parsers = parser.add_subparsers(dest='function') # info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update parser_update", "= parser.parse_args() if args.function == 'info': show_seller_info(args) elif args.function == 'update': modify_seller_info(args) else:", "'passwd']) parser_update.add_argument('data', type=str) def int_check(text): try: int(text); return True except ValueError: return False", "int_check(text): try: int(text); return True except ValueError: return False def show_seller_from_table(row): print(\"Name: {name}\".format(name", "# TODO try: cur = conn.cursor() sql = \"SELECT * FROM seller WHERE", "= argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args() if args.function == 'info': show_seller_info(args) elif args.function", "def show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1])) print(\"Phone Number: {phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3],", "print(\"Name: {name}\".format(name = row[1])) print(\"Phone Number: {phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain =", "conn.cursor() sql = \"SELECT * FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall()", "int(text); return True except ValueError: return False def show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1]))", "\" \\ \"SET {attr} = \\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql)", "parser.parse_args() if args.function == 'info': show_seller_info(args) elif args.function == 'update': modify_seller_info(args) else: parser.print_help()", "err: print(err) conn.rollback() print(\"modify_seller_info\") if __name__ == \"__main__\": start = time.time() parser =", "= sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str)", "{phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4])) def show_seller_info(args): # TODO try:", "TODO try: cur = conn.cursor() sql = \"UPDATE seller \" \\ \"SET {attr}", "== \"__main__\": start = time.time() parser = argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args() if", "\" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except Exception as err:", "\\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except Exception as", "parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr',", "id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except Exception as err: print(err) conn.rollback() print(\"modify_seller_info\") if __name__", "parser_update.add_argument('data', type=str) def int_check(text): try: int(text); return True except ValueError: return False def", "= sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str,", "'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str) def int_check(text): try: int(text); return True except", "{attr} = \\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except", "type=int) # update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local',", "cur.execute(sql) conn.commit() except Exception as err: print(err) conn.rollback() print(\"modify_seller_info\") if __name__ == \"__main__\":", "id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall() if not rows: print(\"Given seller ID doesn't exist.\")", "type=str, choices=['name', 'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str) def int_check(text): try: int(text); return", "Exception as err: print(err) def modify_seller_info(args): # TODO try: cur = conn.cursor() sql", "sql = \"SELECT * FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall() if", "print(err) def modify_seller_info(args): # TODO try: cur = conn.cursor() sql = \"UPDATE seller", "= conn.cursor() sql = \"UPDATE seller \" \\ \"SET {attr} = \\'{data}\\' \"", "try: cur = conn.cursor() sql = \"SELECT * FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql)", "# update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain',", "args.function == 'update': modify_seller_info(args) else: parser.print_help() print(\"Running Time: \", end=\"\") print(time.time() - start)", "print(err) conn.rollback() print(\"modify_seller_info\") if __name__ == \"__main__\": start = time.time() parser = argparse.ArgumentParser()", "conn.rollback() print(\"modify_seller_info\") if __name__ == \"__main__\": start = time.time() parser = argparse.ArgumentParser() parsing_seller(parser)", "True except ValueError: return False def show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1])) print(\"Phone Number:", "parser.add_subparsers(dest='function') # info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update parser_update = sub_parsers.add_parser('update')", "print(\"Given seller ID doesn't exist.\") return for row in rows: show_seller_from_table(row) except Exception", "= \"SELECT * FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall() if not", "return True except ValueError: return False def show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1])) print(\"Phone", "WHERE id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall() if not rows: print(\"Given seller ID doesn't", "\"UPDATE seller \" \\ \"SET {attr} = \\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data,", "{name}\".format(name = row[1])) print(\"Phone Number: {phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4]))", "type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str) def int_check(text): try:", "print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4])) def show_seller_info(args): # TODO try: cur = conn.cursor()", "choices=['name', 'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str) def int_check(text): try: int(text); return True", "\"__main__\": start = time.time() parser = argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args() if args.function", "conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') # info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int)", "return for row in rows: show_seller_from_table(row) except Exception as err: print(err) def modify_seller_info(args):", "row in rows: show_seller_from_table(row) except Exception as err: print(err) def modify_seller_info(args): # TODO", "parser_info.add_argument('id', type=int) # update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone',", "import conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') # info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id',", "except ValueError: return False def show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1])) print(\"Phone Number: {phone}\".format(phone", "* FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall() if not rows: print(\"Given", "__name__ == \"__main__\": start = time.time() parser = argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args()", "cur = conn.cursor() sql = \"SELECT * FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows", "import argparse from helpers.connection import conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') # info", "ID doesn't exist.\") return for row in rows: show_seller_from_table(row) except Exception as err:", "TODO try: cur = conn.cursor() sql = \"SELECT * FROM seller WHERE id={id};\".format(id=args.id)", "= parser.add_subparsers(dest='function') # info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update parser_update =", "conn.commit() except Exception as err: print(err) conn.rollback() print(\"modify_seller_info\") if __name__ == \"__main__\": start", "parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') # info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update", "type=str) def int_check(text): try: int(text); return True except ValueError: return False def show_seller_from_table(row):", "row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4])) def show_seller_info(args): # TODO try: cur =", "argparse from helpers.connection import conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') # info parser_info", "start = time.time() parser = argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args() if args.function ==", "sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name',", "modify_seller_info(args): # TODO try: cur = conn.cursor() sql = \"UPDATE seller \" \\", "= row[4])) def show_seller_info(args): # TODO try: cur = conn.cursor() sql = \"SELECT", "if __name__ == \"__main__\": start = time.time() parser = argparse.ArgumentParser() parsing_seller(parser) args =", "time.time() parser = argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args() if args.function == 'info': show_seller_info(args)", "cur.fetchall() if not rows: print(\"Given seller ID doesn't exist.\") return for row in", "seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except Exception as err: print(err) conn.rollback() print(\"modify_seller_info\")", "print(\"modify_seller_info\") if __name__ == \"__main__\": start = time.time() parser = argparse.ArgumentParser() parsing_seller(parser) args", "in rows: show_seller_from_table(row) except Exception as err: print(err) def modify_seller_info(args): # TODO try:", "FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall() if not rows: print(\"Given seller", "exist.\") return for row in rows: show_seller_from_table(row) except Exception as err: print(err) def", "from helpers.connection import conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') # info parser_info =", "# TODO try: cur = conn.cursor() sql = \"UPDATE seller \" \\ \"SET", "\\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except Exception as err: print(err)", "show_seller_info(args): # TODO try: cur = conn.cursor() sql = \"SELECT * FROM seller", "try: int(text); return True except ValueError: return False def show_seller_from_table(row): print(\"Name: {name}\".format(name =", "rows: show_seller_from_table(row) except Exception as err: print(err) def modify_seller_info(args): # TODO try: cur", "cur = conn.cursor() sql = \"UPDATE seller \" \\ \"SET {attr} = \\'{data}\\'", "'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str) def int_check(text): try: int(text); return True except ValueError:", "row[4])) def show_seller_info(args): # TODO try: cur = conn.cursor() sql = \"SELECT *", "def modify_seller_info(args): # TODO try: cur = conn.cursor() sql = \"UPDATE seller \"", "sql = \"UPDATE seller \" \\ \"SET {attr} = \\'{data}\\' \" \\ \"WHERE", "as err: print(err) conn.rollback() print(\"modify_seller_info\") if __name__ == \"__main__\": start = time.time() parser", "import time import argparse from helpers.connection import conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function')", "parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str) def int_check(text):", "def show_seller_info(args): # TODO try: cur = conn.cursor() sql = \"SELECT * FROM", "args.function == 'info': show_seller_info(args) elif args.function == 'update': modify_seller_info(args) else: parser.print_help() print(\"Running Time:", "\"SET {attr} = \\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit()", "= cur.fetchall() if not rows: print(\"Given seller ID doesn't exist.\") return for row", "== 'info': show_seller_info(args) elif args.function == 'update': modify_seller_info(args) else: parser.print_help() print(\"Running Time: \",", "\"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except Exception as err: print(err) conn.rollback()", "except Exception as err: print(err) def modify_seller_info(args): # TODO try: cur = conn.cursor()", "show_seller_info(args) elif args.function == 'update': modify_seller_info(args) else: parser.print_help() print(\"Running Time: \", end=\"\") print(time.time()", "update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain', 'passwd'])", "if not rows: print(\"Given seller ID doesn't exist.\") return for row in rows:", "parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str) def int_check(text): try: int(text);", "print(sql) cur.execute(sql) conn.commit() except Exception as err: print(err) conn.rollback() print(\"modify_seller_info\") if __name__ ==", "return False def show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1])) print(\"Phone Number: {phone}\".format(phone = row[2]))", "= \"UPDATE seller \" \\ \"SET {attr} = \\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr,", "parser = argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args() if args.function == 'info': show_seller_info(args) elif", "= row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4])) def show_seller_info(args): # TODO try: cur", "False def show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1])) print(\"Phone Number: {phone}\".format(phone = row[2])) print(\"email:", "not rows: print(\"Given seller ID doesn't exist.\") return for row in rows: show_seller_from_table(row)", "parsing_seller(parser) args = parser.parse_args() if args.function == 'info': show_seller_info(args) elif args.function == 'update':", "elif args.function == 'update': modify_seller_info(args) else: parser.print_help() print(\"Running Time: \", end=\"\") print(time.time() -", "def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') # info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) #", "rows = cur.fetchall() if not rows: print(\"Given seller ID doesn't exist.\") return for", "rows: print(\"Given seller ID doesn't exist.\") return for row in rows: show_seller_from_table(row) except", "seller \" \\ \"SET {attr} = \\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id))", "argparse.ArgumentParser() parsing_seller(parser) args = parser.parse_args() if args.function == 'info': show_seller_info(args) elif args.function ==", "seller ID doesn't exist.\") return for row in rows: show_seller_from_table(row) except Exception as", "helpers.connection import conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') # info parser_info = sub_parsers.add_parser('info')", "= row[1])) print(\"Phone Number: {phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4])) def", "\"SELECT * FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall() if not rows:", "show_seller_from_table(row) except Exception as err: print(err) def modify_seller_info(args): # TODO try: cur =", "= conn.cursor() sql = \"SELECT * FROM seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows =", "# info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id',", "for row in rows: show_seller_from_table(row) except Exception as err: print(err) def modify_seller_info(args): #", "Exception as err: print(err) conn.rollback() print(\"modify_seller_info\") if __name__ == \"__main__\": start = time.time()", "<gh_stars>0 import time import argparse from helpers.connection import conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers =", "def int_check(text): try: int(text); return True except ValueError: return False def show_seller_from_table(row): print(\"Name:", "'domain', 'passwd']) parser_update.add_argument('data', type=str) def int_check(text): try: int(text); return True except ValueError: return", "data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except Exception as err: print(err) conn.rollback() print(\"modify_seller_info\") if", "print(\"Phone Number: {phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4])) def show_seller_info(args): #", "as err: print(err) def modify_seller_info(args): # TODO try: cur = conn.cursor() sql =", "conn.cursor() sql = \"UPDATE seller \" \\ \"SET {attr} = \\'{data}\\' \" \\", "ValueError: return False def show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1])) print(\"Phone Number: {phone}\".format(phone =", "err: print(err) def modify_seller_info(args): # TODO try: cur = conn.cursor() sql = \"UPDATE", "sub_parsers.add_parser('update') parser_update.add_argument('id', type=int) parser_update.add_argument('attr', type=str, choices=['name', 'phone', 'local', 'domain', 'passwd']) parser_update.add_argument('data', type=str) def", "seller WHERE id={id};\".format(id=args.id) cur.execute(sql) rows = cur.fetchall() if not rows: print(\"Given seller ID", "{local}@{domain}\".format(local=row[3], domain = row[4])) def show_seller_info(args): # TODO try: cur = conn.cursor() sql", "if args.function == 'info': show_seller_info(args) elif args.function == 'update': modify_seller_info(args) else: parser.print_help() print(\"Running", "\\ \"SET {attr} = \\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql)", "'info': show_seller_info(args) elif args.function == 'update': modify_seller_info(args) else: parser.print_help() print(\"Running Time: \", end=\"\")", "try: cur = conn.cursor() sql = \"UPDATE seller \" \\ \"SET {attr} =", "except Exception as err: print(err) conn.rollback() print(\"modify_seller_info\") if __name__ == \"__main__\": start =", "time import argparse from helpers.connection import conn def parsing_seller(parser:argparse.ArgumentParser): sub_parsers = parser.add_subparsers(dest='function') #", "show_seller_from_table(row): print(\"Name: {name}\".format(name = row[1])) print(\"Phone Number: {phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain", "args = parser.parse_args() if args.function == 'info': show_seller_info(args) elif args.function == 'update': modify_seller_info(args)", "= \\'{data}\\' \" \\ \"WHERE seller.id={id}\".format(attr=args.attr, data=args.data, id=int(args.id)) print(sql) cur.execute(sql) conn.commit() except Exception", "doesn't exist.\") return for row in rows: show_seller_from_table(row) except Exception as err: print(err)", "domain = row[4])) def show_seller_info(args): # TODO try: cur = conn.cursor() sql =", "row[1])) print(\"Phone Number: {phone}\".format(phone = row[2])) print(\"email: {local}@{domain}\".format(local=row[3], domain = row[4])) def show_seller_info(args):", "cur.execute(sql) rows = cur.fetchall() if not rows: print(\"Given seller ID doesn't exist.\") return", "info parser_info = sub_parsers.add_parser('info') parser_info.add_argument('id', type=int) # update parser_update = sub_parsers.add_parser('update') parser_update.add_argument('id', type=int)" ]
[ "def resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return self @property def", "In other words: # x = IdlType('ByteString') # y = IdlType('ByteString') # x", "typedefs[base_type] if resolved_type.base_type in typedefs: raise ValueError(\"We can't typedef a typedef'ed type.\") #", "type of nullable type must not be a union type that ' 'has", "extended_attributes and inner_type.base_type not in ['DOMString', 'USVString']): raise ValueError( 'StringContext is only applicable", "idl_type in self.inner_type.idl_types(): yield idl_type ################################################################################ # IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents", "def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self yield", "getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, } def __setstate__(self,", "# interface_name -> ancestors def inherits_interface(interface_name, ancestor_name): return (interface_name == ancestor_name or ancestor_name", "number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0 for member in self.member_types:", "'object': 'Object', } STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object", "return { 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, } def __setstate__(self, state): self.inner_type = state['inner_type']", "extended_attributes): raise ValueError( 'Extended attributes not applicable to types: %s' % self) if", "We cannot use a set directly because each member is an # IdlTypeBase-derived", "found in the LICENSE file. \"\"\"IDL type handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType", "% (annotation, str(self.inner_type)) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return {", "self.base_type in BASIC_TYPES @property def is_callback_function(self): # pylint: disable=C0103 return self.base_type in IdlType.callback_functions", "frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned short', # int and unsigned are", "def inherits_interface(interface_name, ancestor_name): return (interface_name == ancestor_name or ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors):", "'UnsignedLong', 'long long': 'LongLong', 'unsigned long long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted float': 'UnrestrictedFloat',", "return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>'", "member.is_union_type: count += member.number_of_nullable_member_types return count @property def is_union_type(self): return True def single_matching_member_type(self,", "enum_type(self): return self.element_type.enum_type def idl_types(self): yield self for idl_type in self.element_type.idl_types(): yield idl_type", "base class, and # allows us to relay __getattr__ in IdlNullableType to the", "state['inner_type'] @property def is_nullable(self): return True @property def name(self): return self.inner_type.name + 'OrNull'", "LICENSE file. \"\"\"IDL type handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType", "# http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned short', # int and unsigned are not", "} def __setstate__(self, state): self.inner_type = state['inner_type'] @property def is_nullable(self): return True @property", "lambda member_type: member_type.is_sequence_type) @property def dictionary_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property def", "in self.member_types: if member.is_nullable: member = member.inner_type if member.is_union_type: for inner_member in member.flattened_member_types:", "yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self): return 'sequence<%s>'", "cannot use a set directly because each member is an # IdlTypeBase-derived class,", "member_type.is_enum) ) @property def numeric_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property def boolean_member_type(self):", "IdlType.dictionaries @property def is_enum(self): # FIXME: add an IdlEnumType class and a resolve_enums", "be a union type that ' 'itself includes a nullable type.') if any(member.is_dictionary", "self.base_type in IdlType.callback_functions @property def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if", "if inner_type.name == 'Promise': raise ValueError( 'Inner type of nullable type must not", "ancestors def inherits_interface(interface_name, ancestor_name): return (interface_name == ancestor_name or ancestor_name in ancestors[interface_name]) def", "frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################ # Inheritance ################################################################################ ancestors =", "side, # which matches the JavaScript 'null' in the enum parsing code. inner_values", "self.base_type = state['base_type'] @property def is_basic_type(self): return self.base_type in BASIC_TYPES @property def is_callback_function(self):", "default properties in the base class, and # allows us to relay __getattr__", "= [ member_type.resolve_typedefs(typedefs) for member_type in self.member_types ] return self def idl_types(self): yield", "because we store them in interfaces_info. \"\"\" from collections import defaultdict ################################################################################ #", "'inner_type': self.inner_type, } def __setstate__(self, state): self.inner_type = state['inner_type'] @property def is_nullable(self): return", "idl_type in self.value_type.idl_types(): yield idl_type def resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type =", "http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name", "True @property def name(self): return self.key_type.name + self.value_type.name + 'Record' ################################################################################ # IdlNullableType", "# IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this like \"IdlArrayTypeBase\" or something.", "not be a promise.') if inner_type.is_nullable: raise ValueError( 'Inner type of nullable type", "== 'boolean') @property def sequence_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property def dictionary_member_type(self):", "signature. # return str(self.inner_type) + '?' return str(self.inner_type) def __getattr__(self, name): return getattr(self.inner_type,", "the enum parsing code. inner_values = self.inner_type.enum_values if inner_values: return [None] + inner_values", "type handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are", "treating these # as orthogonal properties (via flags). callback_functions = {} callback_interfaces =", "@property def dictionary_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property def as_union_type(self): # Note:", "and a resolve_enums step # at end of IdlDefinitions constructor return self.name in", "str(self.inner_type) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type,", "= entry.get('callback_function') if not callback_function: return False return 'Custom' in callback_function.extended_attributes @property def", "the list of # enum values. This None value is converted to nullptr", "enum_type(self): return self.name if self.is_enum else None @property def is_integer_type(self): return self.base_type in", "is_unrestricted: self.base_type = 'unrestricted %s' % base_type else: self.base_type = base_type def __str__(self):", "Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable because", "not be any.') if inner_type.name == 'Promise': raise ValueError( 'Inner type of nullable", "# For the case that the resolved type contains other typedef'ed # type(s).", "type with extended attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable to types.", "is governed by a BSD-style license that can be # found in the", "callback_functions = {} callback_interfaces = set() dictionaries = set() enums = {} #", "not callback_function: return False return 'Custom' in callback_function.extended_attributes @property def is_callback_interface(self): return self.base_type", "in IdlType.callback_interfaces @property def is_dictionary(self): return self.base_type in IdlType.dictionaries @property def is_enum(self): #", "type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are RefPtr types. return", "type is not the same as comparing their names. # In other words:", "if val is None else val)) for key, val in sorted(self.extended_attributes.items())) return self.inner_type.name", "in typedefs: raise ValueError(\"We can't typedef a typedef'ed type.\") # For the case", "for idl_type in self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type)", "__setstate__(self, state): self.inner_type = state['inner_type'] @property def is_nullable(self): return True @property def name(self):", "= state['inner_type'] @property def is_nullable(self): return True @property def name(self): return self.inner_type.name +", "if self.is_enum else None @property def is_integer_type(self): return self.base_type in INTEGER_TYPES @property def", "nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0 for member in self.member_types: if member.is_nullable:", "lambda member_type: member_type.is_dictionary) @property def as_union_type(self): # Note: Use this to \"look through\"", "__setstate__(self, state): self.base_type = state['base_type'] @property def is_basic_type(self): return self.base_type in BASIC_TYPES @property", "def name(self): return self.key_type.name + self.value_type.name + 'Record' ################################################################################ # IdlNullableType ################################################################################ #", "def resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return self @property def is_array_or_sequence_type(self): return True", "'Any': raise ValueError('Inner type of nullable type must not be any.') if inner_type.name", "an IdlEnumType class and a resolve_enums step # at end of IdlDefinitions constructor", "for member in inner_type.flattened_member_types): raise ValueError( 'Inner type of nullable type must not", "data types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES =", "len(matching_types) > 1: raise ValueError('%s is ambiguous.' % self.name) return matching_types[0] if matching_types", "list of # enum values. This None value is converted to nullptr on", "Chromium Authors. All rights reserved. # Use of this source code is governed", "must not be a nullable type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0: raise", "types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot use a set directly because each member", "in extended_attributes and inner_type.base_type not in ['DOMString', 'USVString']): raise ValueError( 'StringContext is only", "is_callback_function(self): # pylint: disable=C0103 return self.base_type in IdlType.callback_functions @property def is_custom_callback_function(self): entry =", "# same type is not the same as comparing their names. # In", "name) def __getstate__(self): return { 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, } def __setstate__(self, state):", "member_type.resolve_typedefs(typedefs) for member_type in self.member_types ] return self def idl_types(self): yield self for", "be a nullable type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0: raise ValueError( 'Inner", "ValueError( 'Inner type of nullable type must not be a union type that", "return None def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield", "in self.member_types: for idl_type in member_type.idl_types(): yield idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType", "def single_matching_member_type(self, predicate): matching_types = list(filter(predicate, self.flattened_member_types)) if len(matching_types) > 1: raise ValueError('%s", "IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable because we store them in", "__getstate__(self): return { 'element_type': self.element_type, } def __setstate__(self, state): self.element_type = state['element_type'] def", "Rename this like \"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like types.\"\"\"", "return self.base_type in BASIC_TYPES @property def is_callback_function(self): # pylint: disable=C0103 return self.base_type in", "simplifying its signature. # return str(self.inner_type) + '?' return str(self.inner_type) def __getattr__(self, name):", "# handle the '?' in nullable types (passes nullability separately). # Update that", "= {} # name -> values def __init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__() if", "'Custom' in callback_function.extended_attributes @property def is_callback_interface(self): return self.base_type in IdlType.callback_interfaces @property def is_dictionary(self):", "not in ['DOMString', 'USVString']): raise ValueError( 'StringContext is only applicable to string types.')", "numeric_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property def boolean_member_type(self): return self.single_matching_member_type( lambda member_type:", "# Nullable enums are handled by preprending a None value to the list", "flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot use a set directly because", "# allows us to relay __getattr__ in IdlNullableType to the inner type. return", "are not IDL types 'long', 'unsigned long', 'long long', 'unsigned long long', ])", "allows us to relay __getattr__ in IdlNullableType to the inner type. return None", "self.inner_type.name + annotation def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self):", "# |objects|. # We assume we can use two IDL objects of the", "STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object [[Call]] method's string", "name(self): return self.element_type.name + 'Sequence' @property def is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def", "self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property def boolean_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.base_type ==", "( PRIMITIVE_TYPES | frozenset([ # Built-in, non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString',", "(step 10.11) # (Interface object [[Call]] method's string types.) 'String', 'ByteString', 'USVString', ])", "IdlType(IdlTypeBase): # FIXME: incorporate Nullable, etc. # to support types like short?[] vs.", "x == y # False # x.name == y.name # True # |flattened_members|'s", "objects of the same type interchangeably. flattened_members = {} for member in self.member_types:", "# http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean', 'byte': 'Byte', 'octet': 'Octet', 'short': 'Short', 'unsigned", "None value to the list of # enum values. This None value is", "another type is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++", "################################################################################ # Inheritance ################################################################################ ancestors = defaultdict(list) # interface_name -> ancestors def inherits_interface(interface_name,", "predicate): matching_types = list(filter(predicate, self.flattened_member_types)) if len(matching_types) > 1: raise ValueError('%s is ambiguous.'", "= defaultdict(list) # interface_name -> ancestors def inherits_interface(interface_name, ancestor_name): return (interface_name == ancestor_name", "IdlEnumType class and a resolve_enums step # at end of IdlDefinitions constructor return", "(analogous to Jinja variables). # This allows us to not define default properties", "flattened_members[member.name] = member return set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns the union's number of", "self.name in STRING_TYPES @property def name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type =", "resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self for idl_type", "return self.name in STRING_TYPES @property def name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type", "ancestors = defaultdict(list) # interface_name -> ancestors def inherits_interface(interface_name, ancestor_name): return (interface_name ==", "+ val)) for key, val in self.extended_attributes.items()) return '[%s] %s' % (annotation, str(self.inner_type))", "@property def has_string_context(self): return 'StringContext' in self.extended_attributes @property def name(self): annotation = ''.join(", "license that can be # found in the LICENSE file. \"\"\"IDL type handling.", "method's string types.) 'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp', 'EnforceRange',", "> 1: raise ValueError('%s is ambiguous.' % self.name) return matching_types[0] if matching_types else", "IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and __eq__() methods because they are", "# This allows us to not define default properties in the base class,", "val in sorted(self.extended_attributes.items())) return self.inner_type.name + annotation def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs)", "base_type def __str__(self): return self.base_type def __getstate__(self): return { 'base_type': self.base_type, } def", "of # enum values. This None value is converted to nullptr on the", "'Boolean', 'byte': 'Byte', 'octet': 'Octet', 'short': 'Short', 'unsigned short': 'UnsignedShort', 'long': 'Long', 'unsigned", "for key, val in sorted(self.extended_attributes.items())) return self.inner_type.name + annotation def resolve_typedefs(self, typedefs): self.inner_type", "INTEGER_TYPES @property def is_void(self): return self.base_type == 'void' @property def is_numeric_type(self): return self.base_type", "return self def idl_types(self): yield self for member_type in self.member_types: for idl_type in", "IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this like \"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase):", "'Float', 'unrestricted float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString',", "string types.) 'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext',", "member.is_nullable: member = member.inner_type if member.is_union_type: for inner_member in member.flattened_member_types: flattened_members[inner_member.name] = inner_member", "= base_type def __str__(self): return self.base_type def __getstate__(self): return { 'base_type': self.base_type, }", "yield idl_type ################################################################################ # IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL type", "in STRING_TYPES @property def name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type", "self.single_matching_member_type( lambda member_type: (member_type.is_string_type or member_type.is_enum) ) @property def numeric_member_type(self): return self.single_matching_member_type( lambda", "y = IdlType('ByteString') # x == y # False # x.name == y.name", "the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot use a set", "# http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and __eq__() methods because they are stored", "def is_numeric_type(self): return self.base_type in NUMERIC_TYPES @property def is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES", "@property def enum_type(self): return self.element_type.enum_type def idl_types(self): yield self for idl_type in self.element_type.idl_types():", "\"\"\" def __str__(self): raise NotImplementedError('__str__() should be defined in subclasses') def __getattr__(self, name):", "' 'has a dictionary type as its members.') self.inner_type = inner_type def __str__(self):", "self.value_type, } def __setstate__(self, state): self.key_type = state['key_type'] self.value_type = state['value_type'] def idl_types(self):", "(via flags). callback_functions = {} callback_interfaces = set() dictionaries = set() enums =", "IdlTypeBase(object): \"\"\"Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\" def __str__(self): raise", "is None else val)) for key, val in sorted(self.extended_attributes.items())) return self.inner_type.name + annotation", "+ ('' if val is None else val)) for key, val in sorted(self.extended_attributes.items()))", "is converted to nullptr on the C++ side, # which matches the JavaScript", "PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES | frozenset([ # Built-in,", "matching_types else None @property def string_member_type(self): return self.single_matching_member_type( lambda member_type: (member_type.is_string_type or member_type.is_enum)", "True # |flattened_members|'s keys are type names, the values are type # |objects|.", "self.name == 'Any' or self.name == 'Object' or self.name == 'Promise' ) #", "== y.name # True # |flattened_members|'s keys are type names, the values are", "if member.is_nullable: count += 1 member = member.inner_type if member.is_union_type: count += member.number_of_nullable_member_types", "class IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type): super(IdlRecordType, self).__init__() self.key_type = key_type self.value_type =", "typedefs: resolved_type = typedefs[base_type] if resolved_type.base_type in typedefs: raise ValueError(\"We can't typedef a", "def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries):", "'void' @property def is_numeric_type(self): return self.base_type in NUMERIC_TYPES @property def is_primitive_type(self): return self.base_type", "idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this like \"IdlArrayTypeBase\"", "{ 'member_types': self.member_types, } def __setstate__(self, state): self.member_types = state['member_types'] @property def flattened_member_types(self):", "not the same as comparing their names. # In other words: # x", "} def __setstate__(self, state): self.inner_type = state['inner_type'] self.extended_attributes = state['extended_attributes'] @property def is_annotated_type(self):", "'unsigned long': 'UnsignedLong', 'long long': 'LongLong', 'unsigned long long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted", "def __getstate__(self): return { 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, } def __setstate__(self, state): self.inner_type", "= inner_member else: flattened_members[member.name] = member return set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns the", "short', # int and unsigned are not IDL types 'long', 'unsigned long', 'long", "is_numeric_type(self): return self.base_type in NUMERIC_TYPES @property def is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES @property", "@property def enum_values(self): # Nullable enums are handled by preprending a None value", "its signature. # return str(self.inner_type) + '?' return str(self.inner_type) def __getattr__(self, name): return", "'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################ # Inheritance ################################################################################ ancestors = defaultdict(list)", "return self.element_type.enum_type def idl_types(self): yield self for idl_type in self.element_type.idl_types(): yield idl_type class", "__init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>' % self.element_type @property def name(self):", "'Inner type of nullable type must not be a union type that '", "'UnsignedShort', 'long': 'Long', 'unsigned long': 'UnsignedLong', 'long long': 'LongLong', 'unsigned long long': 'UnsignedLongLong',", "'value_type': self.value_type, } def __setstate__(self, state): self.key_type = state['key_type'] self.value_type = state['value_type'] def", "IdlNullableType wrapper. return self @property def name(self): \"\"\"Return type name (or inner type", "= ''.join( (key + ('' if val is None else val)) for key,", "self.base_type if base_type in typedefs: resolved_type = typedefs[base_type] if resolved_type.base_type in typedefs: raise", "('' if val is None else '=' + val)) for key, val in", "IdlDefinitions constructor return self.name in IdlType.enums @property def enum_values(self): return IdlType.enums.get(self.name) @property def", "return set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns the union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types", "that the resolved type contains other typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs) return self", "http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double', 'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean'])", "FIXME: incorporate Nullable, etc. # to support types like short?[] vs. short[]?, instead", "for member_type in self.member_types) def resolve_typedefs(self, typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs) for member_type", "@property def is_nullable(self): return True @property def name(self): return self.inner_type.name + 'OrNull' @property", "nullable type must not be a promise.') if inner_type.is_nullable: raise ValueError( 'Inner type", "is_integer_type(self): return self.base_type in INTEGER_TYPES @property def is_void(self): return self.base_type == 'void' @property", "class, and # allows us to relay __getattr__ in IdlNullableType to the inner", "'sequence<%s>' % self.element_type @property def name(self): return self.element_type.name + 'Sequence' @property def is_sequence_type(self):", "in extended_attributes): raise ValueError( 'Extended attributes not applicable to types: %s' % self)", "is_frozen_array(self): return True ################################################################################ # IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type):", "val)) for key, val in self.extended_attributes.items()) return '[%s] %s' % (annotation, str(self.inner_type)) def", "ValueError('%s is ambiguous.' % self.name) return matching_types[0] if matching_types else None @property def", "import defaultdict ################################################################################ # IDL types ################################################################################ INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte',", "__str__(self): raise NotImplementedError('__str__() should be defined in subclasses') def __getattr__(self, name): # Default", "in the LICENSE file. \"\"\"IDL type handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType", "Nullable, etc. # to support types like short?[] vs. short[]?, instead of treating", "@classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls,", "state): self.base_type = state['base_type'] @property def is_basic_type(self): return self.base_type in BASIC_TYPES @property def", "__str__(self): return 'record<%s, %s>' % (self.key_type, self.value_type) def __getstate__(self): return { 'key_type': self.key_type,", "ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class for IdlType, IdlUnionType,", "__init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type def __getstate__(self): return { 'element_type': self.element_type,", "RefPtr types. return not (self.is_basic_type or self.is_callback_function or self.is_dictionary or self.is_enum or self.name", "IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\" def __str__(self): raise NotImplementedError('__str__() should be defined", "http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned short', # int and unsigned are not IDL", "return self.base_type in IdlType.callback_functions @property def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function')", "################################################################################ # IDL types ################################################################################ INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short',", "'TreatNullAs', ]) ################################################################################ # Inheritance ################################################################################ ancestors = defaultdict(list) # interface_name -> ancestors", "self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property def as_union_type(self): # Note: Use this to \"look", "type that ' 'has a dictionary type as its members.') self.inner_type = inner_type", "[TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__()", "to the inner type. return None def resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs should", "nullable type must not be a nullable type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types >", "self.inner_type = state['inner_type'] @property def is_nullable(self): return True @property def name(self): return self.inner_type.name", "self for idl_type in self.key_type.idl_types(): yield idl_type for idl_type in self.value_type.idl_types(): yield idl_type", "http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean', 'byte':", "def enum_values(self): # Nullable enums are handled by preprending a None value to", "yield idl_type def resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return self", "+ ')' def __hash__(self): return hash(self.name) def __eq__(self, rhs): return self.name == rhs.name", "end of IdlDefinitions constructor return self.name in IdlType.enums @property def enum_values(self): return IdlType.enums.get(self.name)", "# http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are RefPtr types. return not (self.is_basic_type or", "as its members.') self.inner_type = inner_type def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't #", "# FIXME: incorporate Nullable, etc. # to support types like short?[] vs. short[]?,", "# IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL type with extended attributes.", "'LongLong', 'unsigned long long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted", "yield self for idl_type in self.key_type.idl_types(): yield idl_type for idl_type in self.value_type.idl_types(): yield", "through\" a possible IdlNullableType wrapper. return self @property def name(self): \"\"\"Return type name", "{ 'base_type': self.base_type, } def __setstate__(self, state): self.base_type = state['base_type'] @property def is_basic_type(self):", "self.base_type == 'void' @property def is_numeric_type(self): return self.base_type in NUMERIC_TYPES @property def is_primitive_type(self):", "if inner_type.name == 'Any': raise ValueError('Inner type of nullable type must not be", "cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type = self.base_type if", "self.member_types: if member.is_nullable: count += 1 member = member.inner_type if member.is_union_type: count +=", "to Jinja variables). # This allows us to not define default properties in", "For the case that the resolved type contains other typedef'ed # type(s). return", "'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ])", "extended attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\"", "False return 'Custom' in callback_function.extended_attributes @property def is_callback_interface(self): return self.base_type in IdlType.callback_interfaces @property", "step # at end of IdlDefinitions constructor return self.name in IdlType.enums @property def", "# Anything that is not another type is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types", "def idl_types(self): yield self for idl_type in self.inner_type.idl_types(): yield idl_type ################################################################################ # IdlAnnotatedType", "state['extended_attributes'] @property def is_annotated_type(self): return True @property def has_string_context(self): return 'StringContext' in self.extended_attributes", "# IdlUnionType has __hash__() and __eq__() methods because they are stored # in", "def is_sequence_type(self): return False @property def is_frozen_array(self): return False @property def enum_values(self): return", "return { 'base_type': self.base_type, } def __setstate__(self, state): self.base_type = state['base_type'] @property def", "1: raise ValueError('%s is ambiguous.' % self.name) return matching_types[0] if matching_types else None", "IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType, self).__init__() if inner_type.name == 'Any': raise ValueError('Inner type", "lambda member_type: member_type.is_numeric_type) @property def boolean_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.base_type == 'boolean')", "union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot use a set directly", "in self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self):", "yield idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this like", "'float', 'unrestricted float', 'double', 'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) |", "ValueError(\"We can't typedef a typedef'ed type.\") # For the case that the resolved", "All rights reserved. # Use of this source code is governed by a", "float', 'double', 'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES", "for key in extended_attributes): raise ValueError( 'Extended attributes not applicable to types: %s'", "+ 'OrNull' @property def enum_values(self): # Nullable enums are handled by preprending a", "idl_type in member_type.idl_types(): yield idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi):", "http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are RefPtr types. return not (self.is_basic_type or self.is_callback_function", "def __getstate__(self): return { 'element_type': self.element_type, } def __setstate__(self, state): self.element_type = state['element_type']", "# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this", "# x = IdlType('ByteString') # y = IdlType('ByteString') # x == y #", "reserved. # Use of this source code is governed by a BSD-style license", "if inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0: raise ValueError( 'Inner type of nullable type", "that ' 'itself includes a nullable type.') if any(member.is_dictionary for member in inner_type.flattened_member_types):", "| frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double', 'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type", "STRING_TYPES @property def name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type return", "idl_type in self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def", "be # found in the LICENSE file. \"\"\"IDL type handling. Classes: IdlTypeBase IdlType", "################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this like \"IdlArrayTypeBase\" or", "self.inner_type, 'extended_attributes': self.extended_attributes, } def __setstate__(self, state): self.inner_type = state['inner_type'] self.extended_attributes = state['extended_attributes']", "== 'void' @property def is_numeric_type(self): return self.base_type in NUMERIC_TYPES @property def is_primitive_type(self): return", "the set of the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot", "self.member_types) def resolve_typedefs(self, typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs) for member_type in self.member_types ]", "(self.key_type, self.value_type) def __getstate__(self): return { 'key_type': self.key_type, 'value_type': self.value_type, } def __setstate__(self,", "self.member_types = member_types def __str__(self): return '(' + ' or '.join( str(member_type) for", "% self.name) return matching_types[0] if matching_types else None @property def string_member_type(self): return self.single_matching_member_type(", "def enum_type(self): return self.element_type.enum_type def idl_types(self): yield self for idl_type in self.element_type.idl_types(): yield", "flattened_member_types(self): \"\"\"Returns the set of the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" #", "a BSD-style license that can be # found in the LICENSE file. \"\"\"IDL", "return self.base_type in IdlType.callback_interfaces @property def is_dictionary(self): return self.base_type in IdlType.dictionaries @property def", "for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\" def __str__(self): raise NotImplementedError('__str__() should be", "and IdlNullableType. \"\"\" def __str__(self): raise NotImplementedError('__str__() should be defined in subclasses') def", "and [TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType,", "IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL type with extended attributes. [Clamp], [EnforceRange], [StringContext], and", "True def single_matching_member_type(self, predicate): matching_types = list(filter(predicate, self.flattened_member_types)) if len(matching_types) > 1: raise", "return matching_types[0] if matching_types else None @property def string_member_type(self): return self.single_matching_member_type( lambda member_type:", "super(IdlNullableType, self).__init__() if inner_type.name == 'Any': raise ValueError('Inner type of nullable type must", "of the same type interchangeably. flattened_members = {} for member in self.member_types: if", "])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES |", "IdlTypeBase-derived class, and comparing two objects of the # same type is not", "in IdlType.dictionaries @property def is_enum(self): # FIXME: add an IdlEnumType class and a", "@property def is_string_type(self): return self.name in STRING_TYPES @property def name(self): \"\"\"Return type name", "state['member_types'] @property def flattened_member_types(self): \"\"\"Returns the set of the union's flattened member types.", "long': 'LongLong', 'unsigned long long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted float': 'UnrestrictedFloat', 'double': 'Double',", "element_type def __getstate__(self): return { 'element_type': self.element_type, } def __setstate__(self, state): self.element_type =", "source code is governed by a BSD-style license that can be # found", "(or inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for member_type in", "that function to handle nullability from the type name, # simplifying its signature.", "us to relay __getattr__ in IdlNullableType to the inner type. return None def", "is_enum(self): # FIXME: add an IdlEnumType class and a resolve_enums step # at", "in INTEGER_TYPES @property def is_void(self): return self.base_type == 'void' @property def is_numeric_type(self): return", "def __setstate__(self, state): self.inner_type = state['inner_type'] self.extended_attributes = state['extended_attributes'] @property def is_annotated_type(self): return", "@property def is_integer_type(self): return self.base_type in INTEGER_TYPES @property def is_void(self): return self.base_type ==", "def resolve_typedefs(self, typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs) for member_type in self.member_types ] return", "def __str__(self): return 'record<%s, %s>' % (self.key_type, self.value_type) def __getstate__(self): return { 'key_type':", "def is_string_type(self): return self.name in STRING_TYPES @property def name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name", "member_type: (member_type.is_string_type or member_type.is_enum) ) @property def numeric_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type)", "non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ]))", "= extended_attributes if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes): raise ValueError(", "################################################################################ ancestors = defaultdict(list) # interface_name -> ancestors def inherits_interface(interface_name, ancestor_name): return (interface_name", "return { 'key_type': self.key_type, 'value_type': self.value_type, } def __setstate__(self, state): self.key_type = state['key_type']", "set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums)", "'record<%s, %s>' % (self.key_type, self.value_type) def __getstate__(self): return { 'key_type': self.key_type, 'value_type': self.value_type,", "relay __getattr__ in IdlNullableType to the inner type. return None def resolve_typedefs(self, typedefs):", "def __hash__(self): return hash(self.name) def __eq__(self, rhs): return self.name == rhs.name def __getstate__(self):", "def number_of_nullable_member_types(self): \"\"\"Returns the union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count =", "typedefs): raise NotImplementedError( 'resolve_typedefs should be defined in subclasses') def idl_types(self): \"\"\"A generator", "False # x.name == y.name # True # |flattened_members|'s keys are type names,", "return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, } def __setstate__(self, state):", "array-like types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type def __getstate__(self): return", "'unrestricted float', 'double', 'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES)", "-> ancestors def inherits_interface(interface_name, ancestor_name): return (interface_name == ancestor_name or ancestor_name in ancestors[interface_name])", "return self.key_type.name + self.value_type.name + 'Record' ################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class", "types (passes nullability separately). # Update that function to handle nullability from the", "type must not be any.') if inner_type.name == 'Promise': raise ValueError( 'Inner type", "ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and", "resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return self @property def is_record_type(self):", "(self.is_basic_type or self.is_callback_function or self.is_dictionary or self.is_enum or self.name == 'Any' or self.name", "must not be a promise.') if inner_type.is_nullable: raise ValueError( 'Inner type of nullable", "member_type.is_numeric_type) @property def boolean_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.base_type == 'boolean') @property def", "sequence_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property def dictionary_member_type(self): return self.single_matching_member_type( lambda member_type:", "IDL types ################################################################################ INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned short',", "in self.extended_attributes @property def name(self): annotation = ''.join( (key + ('' if val", "inner_type.name == 'Promise': raise ValueError( 'Inner type of nullable type must not be", "def string_member_type(self): return self.single_matching_member_type( lambda member_type: (member_type.is_string_type or member_type.is_enum) ) @property def numeric_member_type(self):", "x.name == y.name # True # |flattened_members|'s keys are type names, the values", "member_type in self.member_types: for idl_type in member_type.idl_types(): yield idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType,", "member in inner_type.flattened_member_types): raise ValueError( 'Inner type of nullable type must not be", "@property def is_record_type(self): return True @property def name(self): return self.key_type.name + self.value_type.name +", "@property def number_of_nullable_member_types(self): \"\"\"Returns the union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count", "class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL type with extended attributes. [Clamp], [EnforceRange], [StringContext],", "other typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs) return self ################################################################################ # IdlUnionType ################################################################################ class", "name) def __getstate__(self): return { 'inner_type': self.inner_type, } def __setstate__(self, state): self.inner_type =", "the inner type. return None def resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs should be", "PRIMITIVE_TYPES | frozenset([ # Built-in, non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString',", "must not be a union type that ' 'has a dictionary type as", "https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType, self).__init__() if inner_type.name == 'Any': raise", "typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self for idl_type in", "extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type self.extended_attributes = extended_attributes if any(key not in", "is not the same as comparing their names. # In other words: #", "{} for member in self.member_types: if member.is_nullable: member = member.inner_type if member.is_union_type: for", "# IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type): super(IdlRecordType, self).__init__() self.key_type =", "of the # same type is not the same as comparing their names.", "same type interchangeably. flattened_members = {} for member in self.member_types: if member.is_nullable: member", "def __str__(self): return 'FrozenArray<%s>' % self.element_type @property def name(self): return self.element_type.name + 'Array'", "member.number_of_nullable_member_types return count @property def is_union_type(self): return True def single_matching_member_type(self, predicate): matching_types =", "def resolve_typedefs(self, typedefs): base_type = self.base_type if base_type in typedefs: resolved_type = typedefs[base_type]", "or self.name == 'Object' or self.name == 'Promise' ) # Promise will be", "'Promise' ) # Promise will be basic in future @property def is_string_type(self): return", "= IdlType('ByteString') # y = IdlType('ByteString') # x == y # False #", "__eq__(self, rhs): return self.name == rhs.name def __getstate__(self): return { 'member_types': self.member_types, }", "new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums) def", "by a BSD-style license that can be # found in the LICENSE file.", "be defined in subclasses') def __getattr__(self, name): # Default undefined attributes to None", "{} # name -> values def __init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted:", "handle nullability from the type name, # simplifying its signature. # return str(self.inner_type)", "def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't # handle the '?' in nullable types", "ancestor_name or ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class for", "lambda member_type: (member_type.is_string_type or member_type.is_enum) ) @property def numeric_member_type(self): return self.single_matching_member_type( lambda member_type:", "name): # Default undefined attributes to None (analogous to Jinja variables). # This", "generator which yields IdlTypes which are referenced from |self|, including itself.\"\"\" yield self", "self @property def name(self): \"\"\"Return type name (or inner type name if nullable)", "long': 'UnsignedLong', 'long long': 'LongLong', 'unsigned long long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted float':", "self for idl_type in self.inner_type.idl_types(): yield idl_type ################################################################################ # IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase):", "return self.base_type == 'void' @property def is_numeric_type(self): return self.base_type in NUMERIC_TYPES @property def", "inner_values return None def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self):", "callback_function.extended_attributes @property def is_callback_interface(self): return self.base_type in IdlType.callback_interfaces @property def is_dictionary(self): return self.base_type", "separately). # Update that function to handle nullability from the type name, #", "member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot use a set directly because each", "idl_type for idl_type in self.value_type.idl_types(): yield idl_type def resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs)", "self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self): return", "IdlType.callback_functions @property def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if not callback_function:", ") # Promise will be basic in future @property def is_string_type(self): return self.name", "################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type): super(IdlRecordType, self).__init__() self.key_type = key_type self.value_type", "'Octet', 'short': 'Short', 'unsigned short': 'UnsignedShort', 'long': 'Long', 'unsigned long': 'UnsignedLong', 'long long':", "resolved_type.resolve_typedefs(typedefs) return self ################################################################################ # IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType", "\"\"\"A generator which yields IdlTypes which are referenced from |self|, including itself.\"\"\" yield", "self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return self @property def is_record_type(self): return True @property def", "for member in self.member_types: if member.is_nullable: member = member.inner_type if member.is_union_type: for inner_member", "# TODO(bashi): Rename this like \"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for", "True ################################################################################ # IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type): super(IdlRecordType, self).__init__()", "\"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod def", "'boolean': 'Boolean', 'byte': 'Byte', 'octet': 'Octet', 'short': 'Short', 'unsigned short': 'UnsignedShort', 'long': 'Long',", "member_type.base_type == 'boolean') @property def sequence_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property def", "'Array' @property def is_frozen_array(self): return True ################################################################################ # IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def", "and inner_type.base_type not in ['DOMString', 'USVString']): raise ValueError( 'StringContext is only applicable to", "http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are RefPtr types. return not (self.is_basic_type", "idl_type ################################################################################ # IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL type with", "type must not be a nullable type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0:", "return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property def boolean_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.base_type", "self.key_type = state['key_type'] self.value_type = state['value_type'] def idl_types(self): yield self for idl_type in", "state): self.inner_type = state['inner_type'] self.extended_attributes = state['extended_attributes'] @property def is_annotated_type(self): return True @property", "Anything that is not another type is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types #", "https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot use a set directly because each member is", ") @property def numeric_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property def boolean_member_type(self): return", "case that the resolved type contains other typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs) return", "self.base_type in IdlType.dictionaries @property def is_enum(self): # FIXME: add an IdlEnumType class and", "False @property def enum_values(self): return self.element_type.enum_values @property def enum_type(self): return self.element_type.enum_type def idl_types(self):", "list(filter(predicate, self.flattened_member_types)) if len(matching_types) > 1: raise ValueError('%s is ambiguous.' % self.name) return", "types ################################################################################ INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned short', #", "matching_types = list(filter(predicate, self.flattened_member_types)) if len(matching_types) > 1: raise ValueError('%s is ambiguous.' %", "idl_types(self): yield self for idl_type in self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self,", "not applicable to types: %s' % self) if ('StringContext' in extended_attributes and inner_type.base_type", "rhs.name def __getstate__(self): return { 'member_types': self.member_types, } def __setstate__(self, state): self.member_types =", "if any(member.is_dictionary for member in inner_type.flattened_member_types): raise ValueError( 'Inner type of nullable type", "@property def enum_type(self): return self.name if self.is_enum else None @property def is_integer_type(self): return", "state['value_type'] def idl_types(self): yield self for idl_type in self.key_type.idl_types(): yield idl_type for idl_type", "return True @property def is_sequence_type(self): return False @property def is_frozen_array(self): return False @property", "def idl_types(self): \"\"\"A generator which yields IdlTypes which are referenced from |self|, including", "not IDL types 'long', 'unsigned long', 'long long', 'unsigned long long', ]) NUMERIC_TYPES", "to \"look through\" a possible IdlNullableType wrapper. return self @property def name(self): \"\"\"Return", "'long', 'unsigned long', 'long long', 'unsigned long long', ]) NUMERIC_TYPES = ( INTEGER_TYPES", "resolved_type.base_type in typedefs: raise ValueError(\"We can't typedef a typedef'ed type.\") # For the", "name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, } def __setstate__(self,", "is an # IdlTypeBase-derived class, and comparing two objects of the # same", "self.base_type = base_type def __str__(self): return self.base_type def __getstate__(self): return { 'base_type': self.base_type,", "wrapper. return self @property def name(self): \"\"\"Return type name (or inner type name", "IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and __eq__() methods", "return [None] + inner_values return None def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return", "# Note: Use this to \"look through\" a possible IdlNullableType wrapper. return self", "__str__(self): return self.base_type def __getstate__(self): return { 'base_type': self.base_type, } def __setstate__(self, state):", "\"\"\" base_type = self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod", "self.member_types: for idl_type in member_type.idl_types(): yield idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################", "basic in future @property def is_string_type(self): return self.name in STRING_TYPES @property def name(self):", "None @property def is_integer_type(self): return self.base_type in INTEGER_TYPES @property def is_void(self): return self.base_type", "################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and __eq__() methods because", "__str__(self): return 'sequence<%s>' % self.element_type @property def name(self): return self.element_type.name + 'Sequence' @property", "types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES = {", "nullable type must not be a union type that ' 'itself includes a", "key_type, value_type): super(IdlRecordType, self).__init__() self.key_type = key_type self.value_type = value_type def __str__(self): return", "orthogonal properties (via flags). callback_functions = {} callback_interfaces = set() dictionaries = set()", "types. return not (self.is_basic_type or self.is_callback_function or self.is_dictionary or self.is_enum or self.name ==", "def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums):", "of the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot use a", "{} callback_interfaces = set() dictionaries = set() enums = {} # name ->", "if matching_types else None @property def string_member_type(self): return self.single_matching_member_type( lambda member_type: (member_type.is_string_type or", "= self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self for idl_type in self.inner_type.idl_types(): yield", "int and unsigned are not IDL types 'long', 'unsigned long', 'long long', 'unsigned", "in typedefs: resolved_type = typedefs[base_type] if resolved_type.base_type in typedefs: raise ValueError(\"We can't typedef", "= set() dictionaries = set() enums = {} # name -> values def", "################################################################################ # IdlType ################################################################################ class IdlType(IdlTypeBase): # FIXME: incorporate Nullable, etc. # to", "are stored # in sets. def __init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types = member_types", "%s' % self) if ('StringContext' in extended_attributes and inner_type.base_type not in ['DOMString', 'USVString']):", "for idl_type in self.key_type.idl_types(): yield idl_type for idl_type in self.value_type.idl_types(): yield idl_type def", "def __eq__(self, rhs): return self.name == rhs.name def __getstate__(self): return { 'member_types': self.member_types,", "return self def idl_types(self): yield self for idl_type in self.inner_type.idl_types(): yield idl_type ################################################################################", "self.key_type = key_type self.value_type = value_type def __str__(self): return 'record<%s, %s>' % (self.key_type,", "for member_type in self.member_types ] return self def idl_types(self): yield self for member_type", "################################################################################ # IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL type with extended", "for idl_type in self.inner_type.idl_types(): yield idl_type ################################################################################ # IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType", "\"\"\" # We cannot use a set directly because each member is an", "\"\"\" count = 0 for member in self.member_types: if member.is_nullable: count += 1", "self).__init__() self.inner_type = inner_type self.extended_attributes = extended_attributes if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for", "INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned short', # int and", "def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self for", "return True @property def name(self): return self.inner_type.name + 'OrNull' @property def enum_values(self): #", "'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################", "= state['element_type'] def resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return self @property def is_array_or_sequence_type(self):", "# We cannot use a set directly because each member is an #", "'long long', 'unsigned long long', ]) NUMERIC_TYPES = ( INTEGER_TYPES | frozenset([ #", "None def resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs should be defined in subclasses') def", "= {} callback_interfaces = set() dictionaries = set() enums = {} # name", "self def idl_types(self): yield self for idl_type in self.inner_type.idl_types(): yield idl_type ################################################################################ #", "]) ################################################################################ # Inheritance ################################################################################ ancestors = defaultdict(list) # interface_name -> ancestors def", "return { 'member_types': self.member_types, } def __setstate__(self, state): self.member_types = state['member_types'] @property def", "callback_function = entry.get('callback_function') if not callback_function: return False return 'Custom' in callback_function.extended_attributes @property", "__getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes,", "typedef a typedef'ed type.\") # For the case that the resolved type contains", "return '(' + ' or '.join( str(member_type) for member_type in self.member_types) + ')'", "TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod", "an # IdlTypeBase-derived class, and comparing two objects of the # same type", "IdlType ################################################################################ class IdlType(IdlTypeBase): # FIXME: incorporate Nullable, etc. # to support types", "'has a dictionary type as its members.') self.inner_type = inner_type def __str__(self): #", "'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################ # Inheritance ################################################################################ ancestors = defaultdict(list) #", "is_void(self): return self.base_type == 'void' @property def is_numeric_type(self): return self.base_type in NUMERIC_TYPES @property", "nullable type must not be any.') if inner_type.name == 'Promise': raise ValueError( 'Inner", "the C++ side, # which matches the JavaScript 'null' in the enum parsing", "'StringContext is only applicable to string types.') def __str__(self): annotation = ', '.join(", "= 0 for member in self.member_types: if member.is_nullable: count += 1 member =", "def is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES @property def is_interface_type(self): # Anything that is", "'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES = (", "new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type = self.base_type", "return False @property def enum_values(self): return self.element_type.enum_values @property def enum_type(self): return self.element_type.enum_type def", "not another type is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In", "return self ################################################################################ # IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType has", "something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__()", "return self.single_matching_member_type( lambda member_type: member_type.base_type == 'boolean') @property def sequence_member_type(self): return self.single_matching_member_type( lambda", "(annotation, str(self.inner_type)) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type':", "member_types def __str__(self): return '(' + ' or '.join( str(member_type) for member_type in", "type of nullable type must not be a promise.') if inner_type.is_nullable: raise ValueError(", "def __setstate__(self, state): self.key_type = state['key_type'] self.value_type = state['value_type'] def idl_types(self): yield self", "a None value to the list of # enum values. This None value", "of this source code is governed by a BSD-style license that can be", "frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double', 'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES", "typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return self @property def is_array_or_sequence_type(self): return True @property def", "__str__(self): annotation = ', '.join( (key + ('' if val is None else", "self.member_types = state['member_types'] @property def flattened_member_types(self): \"\"\"Returns the set of the union's flattened", "other words: # x = IdlType('ByteString') # y = IdlType('ByteString') # x ==", "= IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if not callback_function: return False return 'Custom' in", "flattened_members[inner_member.name] = inner_member else: flattened_members[member.name] = member return set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns", "we can use two IDL objects of the same type interchangeably. flattened_members =", "same as comparing their names. # In other words: # x = IdlType('ByteString')", "# found in the LICENSE file. \"\"\"IDL type handling. Classes: IdlTypeBase IdlType IdlUnionType", "Nullable enums are handled by preprending a None value to the list of", "unsigned are not IDL types 'long', 'unsigned long', 'long long', 'unsigned long long',", "super(IdlType, self).__init__() if is_unrestricted: self.base_type = 'unrestricted %s' % base_type else: self.base_type =", "@property def is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES @property def is_interface_type(self): # Anything that", "def name(self): annotation = ''.join( (key + ('' if val is None else", "two objects of the # same type is not the same as comparing", "C++ these are RefPtr types. return not (self.is_basic_type or self.is_callback_function or self.is_dictionary or", "'extended_attributes': self.extended_attributes, } def __setstate__(self, state): self.inner_type = state['inner_type'] self.extended_attributes = state['extended_attributes'] @property", "IdlType.callback_interfaces @property def is_dictionary(self): return self.base_type in IdlType.dictionaries @property def is_enum(self): # FIXME:", "] return self def idl_types(self): yield self for member_type in self.member_types: for idl_type", "type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0: raise ValueError( 'Inner type of nullable", "by preprending a None value to the list of # enum values. This", "handled by preprending a None value to the list of # enum values.", "types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type def __getstate__(self): return {", "return self.name if self.is_enum else None @property def is_integer_type(self): return self.base_type in INTEGER_TYPES", "to string types.') def __str__(self): annotation = ', '.join( (key + ('' if", "= ', '.join( (key + ('' if val is None else '=' +", "# |flattened_members|'s keys are type names, the values are type # |objects|. #", "def is_callback_interface(self): return self.base_type in IdlType.callback_interfaces @property def is_dictionary(self): return self.base_type in IdlType.dictionaries", "== 'Object' or self.name == 'Promise' ) # Promise will be basic in", "typedefs): base_type = self.base_type if base_type in typedefs: resolved_type = typedefs[base_type] if resolved_type.base_type", "def __init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted: self.base_type = 'unrestricted %s' %", "IdlTypes are picklable because we store them in interfaces_info. \"\"\" from collections import", "their names. # In other words: # x = IdlType('ByteString') # y =", "a union type that ' 'itself includes a nullable type.') if any(member.is_dictionary for", "return self.inner_type.name + annotation def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def", "long', 'unsigned long long', ]) NUMERIC_TYPES = ( INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type", "in callback_function.extended_attributes @property def is_callback_interface(self): return self.base_type in IdlType.callback_interfaces @property def is_dictionary(self): return", "are handled by preprending a None value to the list of # enum", "self.inner_type.idl_types(): yield idl_type ################################################################################ # IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL", "= list(filter(predicate, self.flattened_member_types)) if len(matching_types) > 1: raise ValueError('%s is ambiguous.' % self.name)", "'member_types': self.member_types, } def __setstate__(self, state): self.member_types = state['member_types'] @property def flattened_member_types(self): \"\"\"Returns", "state['inner_type'] self.extended_attributes = state['extended_attributes'] @property def is_annotated_type(self): return True @property def has_string_context(self): return", "(frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES | frozenset([ # Built-in, non-composite, non-object", "in the base class, and # allows us to relay __getattr__ in IdlNullableType", "return self.base_type in IdlType.dictionaries @property def is_enum(self): # FIXME: add an IdlEnumType class", "@property def flattened_member_types(self): \"\"\"Returns the set of the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types", "% base_type else: self.base_type = base_type def __str__(self): return self.base_type def __getstate__(self): return", "__setstate__(self, state): self.inner_type = state['inner_type'] self.extended_attributes = state['extended_attributes'] @property def is_annotated_type(self): return True", "', '.join( (key + ('' if val is None else '=' + val))", "return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property def dictionary_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_dictionary)", "collections import defaultdict ################################################################################ # IDL types ################################################################################ INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type", "inner_type.number_of_nullable_member_types > 0: raise ValueError( 'Inner type of nullable type must not be", "are picklable because we store them in interfaces_info. \"\"\" from collections import defaultdict", "+= 1 member = member.inner_type if member.is_union_type: count += member.number_of_nullable_member_types return count @property", "the '?' in nullable types (passes nullability separately). # Update that function to", "@property def is_dictionary(self): return self.base_type in IdlType.dictionaries @property def is_enum(self): # FIXME: add", "values are type # |objects|. # We assume we can use two IDL", "def boolean_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.base_type == 'boolean') @property def sequence_member_type(self): return", "NotImplementedError('__str__() should be defined in subclasses') def __getattr__(self, name): # Default undefined attributes", "variables). # This allows us to not define default properties in the base", "IdlArrayOrSequenceType and IdlNullableType. \"\"\" def __str__(self): raise NotImplementedError('__str__() should be defined in subclasses')", "return 'record<%s, %s>' % (self.key_type, self.value_type) def __getstate__(self): return { 'key_type': self.key_type, 'value_type':", "in NUMERIC_TYPES @property def is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES @property def is_interface_type(self): #", "self for member_type in self.member_types: for idl_type in member_type.idl_types(): yield idl_type ################################################################################ #", "'resolve_typedefs should be defined in subclasses') def idl_types(self): \"\"\"A generator which yields IdlTypes", "raise ValueError( 'Extended attributes not applicable to types: %s' % self) if ('StringContext'", "return True def single_matching_member_type(self, predicate): matching_types = list(filter(predicate, self.flattened_member_types)) if len(matching_types) > 1:", "set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries)", "= (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES | frozenset([ # Built-in, non-composite,", "nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for member_type in self.member_types) def resolve_typedefs(self, typedefs): self.member_types", "define default properties in the base class, and # allows us to relay", "in self.member_types) def resolve_typedefs(self, typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs) for member_type in self.member_types", "1 member = member.inner_type if member.is_union_type: count += member.number_of_nullable_member_types return count @property def", "def dictionary_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property def as_union_type(self): # Note: Use", "True @property def name(self): return self.inner_type.name + 'OrNull' @property def enum_values(self): # Nullable", "yield idl_type for idl_type in self.value_type.idl_types(): yield idl_type def resolve_typedefs(self, typedefs): self.key_type =", "val is None else '=' + val)) for key, val in self.extended_attributes.items()) return", "== rhs.name def __getstate__(self): return { 'member_types': self.member_types, } def __setstate__(self, state): self.member_types", "def __init__(self, key_type, value_type): super(IdlRecordType, self).__init__() self.key_type = key_type self.value_type = value_type def", "incorporate Nullable, etc. # to support types like short?[] vs. short[]?, instead of", "element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type def __getstate__(self): return { 'element_type': self.element_type, }", "set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\"", "self.member_types: if member.is_nullable: member = member.inner_type if member.is_union_type: for inner_member in member.flattened_member_types: flattened_members[inner_member.name]", "resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self yield self.inner_type", "if ('StringContext' in extended_attributes and inner_type.base_type not in ['DOMString', 'USVString']): raise ValueError( 'StringContext", "properties (via flags). callback_functions = {} callback_interfaces = set() dictionaries = set() enums", "type. return None def resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs should be defined in", "def is_union_type(self): return True def single_matching_member_type(self, predicate): matching_types = list(filter(predicate, self.flattened_member_types)) if len(matching_types)", "referenced from |self|, including itself.\"\"\" yield self ################################################################################ # IdlType ################################################################################ class IdlType(IdlTypeBase):", "name (or inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for member_type", "is None else '=' + val)) for key, val in self.extended_attributes.items()) return '[%s]", "self.extended_attributes.items()) return '[%s] %s' % (annotation, str(self.inner_type)) def __getattr__(self, name): return getattr(self.inner_type, name)", "type of nullable type must not be any.') if inner_type.name == 'Promise': raise", "# Default undefined attributes to None (analogous to Jinja variables). # This allows", "self.element_type, } def __setstate__(self, state): self.element_type = state['element_type'] def resolve_typedefs(self, typedefs): self.element_type =", "of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0 for member in self.member_types: if", "Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source", "def __setstate__(self, state): self.base_type = state['base_type'] @property def is_basic_type(self): return self.base_type in BASIC_TYPES", "= self.value_type.resolve_typedefs(typedefs) return self @property def is_record_type(self): return True @property def name(self): return", "'double', 'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES =", "of nullable type must not be a promise.') if inner_type.is_nullable: raise ValueError( 'Inner", "ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\" def", "types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type self.extended_attributes", "# FIXME: Dictionary::ConversionContext::setConversionType can't # handle the '?' in nullable types (passes nullability", "types 'long', 'unsigned long', 'long long', 'unsigned long long', ]) NUMERIC_TYPES = (", "@property def boolean_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.base_type == 'boolean') @property def sequence_member_type(self):", "cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type = self.base_type if base_type in typedefs: resolved_type =", "be any.') if inner_type.name == 'Promise': raise ValueError( 'Inner type of nullable type", "short': 'UnsignedShort', 'long': 'Long', 'unsigned long': 'UnsignedLong', 'long long': 'LongLong', 'unsigned long long':", "inner_member in member.flattened_member_types: flattened_members[inner_member.name] = inner_member else: flattened_members[member.name] = member return set(flattened_members.values()) @property", "class, and comparing two objects of the # same type is not the", "values. This None value is converted to nullptr on the C++ side, #", "self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self for idl_type in self.inner_type.idl_types(): yield idl_type", "y.name # True # |flattened_members|'s keys are type names, the values are type", "<filename>third_party/blink/renderer/bindings/scripts/idl_types.py # Copyright 2014 The Chromium Authors. All rights reserved. # Use of", "of IdlDefinitions constructor return self.name in IdlType.enums @property def enum_values(self): return IdlType.enums.get(self.name) @property", "element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>' % self.element_type @property def name(self): return", "on the C++ side, # which matches the JavaScript 'null' in the enum", "self.is_enum or self.name == 'Any' or self.name == 'Object' or self.name == 'Promise'", "in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes): raise ValueError( 'Extended attributes not applicable to", "resolved type contains other typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs) return self ################################################################################ #", "self ################################################################################ # IdlType ################################################################################ class IdlType(IdlTypeBase): # FIXME: incorporate Nullable, etc. #", "from the type name, # simplifying its signature. # return str(self.inner_type) + '?'", "long long', ]) NUMERIC_TYPES = ( INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted", "or ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class for IdlType,", "def enum_type(self): return self.name if self.is_enum else None @property def is_integer_type(self): return self.base_type", "= self.element_type.resolve_typedefs(typedefs) return self @property def is_array_or_sequence_type(self): return True @property def is_sequence_type(self): return", "@property def is_frozen_array(self): return True ################################################################################ # IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self,", "the JavaScript 'null' in the enum parsing code. inner_values = self.inner_type.enum_values if inner_values:", "member_type.idl_types(): yield idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this", "self.element_type.name + 'Array' @property def is_frozen_array(self): return True ################################################################################ # IdlRecordType ################################################################################ class", "IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType, self).__init__() if inner_type.name", "# In other words: # x = IdlType('ByteString') # y = IdlType('ByteString') #", "store them in interfaces_info. \"\"\" from collections import defaultdict ################################################################################ # IDL types", "member = member.inner_type if member.is_union_type: count += member.number_of_nullable_member_types return count @property def is_union_type(self):", "'Short', 'unsigned short': 'UnsignedShort', 'long': 'Long', 'unsigned long': 'UnsignedLong', 'long long': 'LongLong', 'unsigned", "= inner_type def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't # handle the '?' in", "type names, the values are type # |objects|. # We assume we can", "'=' + val)) for key, val in self.extended_attributes.items()) return '[%s] %s' % (annotation,", "count @property def is_union_type(self): return True def single_matching_member_type(self, predicate): matching_types = list(filter(predicate, self.flattened_member_types))", "return self.element_type.name + 'Sequence' @property def is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self,", "self.inner_type.enum_values if inner_values: return [None] + inner_values return None def resolve_typedefs(self, typedefs): self.inner_type", "def __getstate__(self): return { 'base_type': self.base_type, } def __setstate__(self, state): self.base_type = state['base_type']", "to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type", "has __hash__() and __eq__() methods because they are stored # in sets. def", "super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>' % self.element_type @property def name(self): return self.element_type.name", "IdlNullableType IdlAnnotatedType IdlTypes are picklable because we store them in interfaces_info. \"\"\" from", "@property def name(self): return self.inner_type.name + 'OrNull' @property def enum_values(self): # Nullable enums", "@property def is_callback_interface(self): return self.base_type in IdlType.callback_interfaces @property def is_dictionary(self): return self.base_type in", "are RefPtr types. return not (self.is_basic_type or self.is_callback_function or self.is_dictionary or self.is_enum or", "code is governed by a BSD-style license that can be # found in", "member_type in self.member_types) + ')' def __hash__(self): return hash(self.name) def __eq__(self, rhs): return", "state): self.element_type = state['element_type'] def resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return self @property", "the type name, # simplifying its signature. # return str(self.inner_type) + '?' return", "\"\"\"IdlAnnoatedType represents an IDL type with extended attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs]", "comparing two objects of the # same type is not the same as", "handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable", "__getstate__(self): return { 'member_types': self.member_types, } def __setstate__(self, state): self.member_types = state['member_types'] @property", "self.element_type.enum_values @property def enum_type(self): return self.element_type.enum_type def idl_types(self): yield self for idl_type in", "or member_type.is_enum) ) @property def numeric_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property def", "'?' return str(self.inner_type) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return {", "state): self.key_type = state['key_type'] self.value_type = state['value_type'] def idl_types(self): yield self for idl_type", "is_basic_type(self): return self.base_type in BASIC_TYPES @property def is_callback_function(self): # pylint: disable=C0103 return self.base_type", "TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean', 'byte': 'Byte', 'octet': 'Octet',", "class IdlType(IdlTypeBase): # FIXME: incorporate Nullable, etc. # to support types like short?[]", "in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType", "# name -> values def __init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted: self.base_type", "stored # in sets. def __init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types = member_types def", "# http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object [[Call]] method's string types.) 'String', 'ByteString',", "def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType.", "'element_type': self.element_type, } def __setstate__(self, state): self.element_type = state['element_type'] def resolve_typedefs(self, typedefs): self.element_type", "pylint: disable=C0103 return self.base_type in IdlType.callback_functions @property def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function", "def is_interface_type(self): # Anything that is not another type is an interface type.", "IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type): super(IdlRecordType, self).__init__() self.key_type = key_type self.value_type = value_type", "the same type interchangeably. flattened_members = {} for member in self.member_types: if member.is_nullable:", "in nullable types (passes nullability separately). # Update that function to handle nullability", "if resolved_type.base_type in typedefs: raise ValueError(\"We can't typedef a typedef'ed type.\") # For", "IDL objects of the same type interchangeably. flattened_members = {} for member in", "is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES @property def is_interface_type(self): # Anything that is not", "annotation def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self", "member_types): super(IdlUnionType, self).__init__() self.member_types = member_types def __str__(self): return '(' + ' or", "INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double', 'unrestricted double', ])) #", "http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean', 'byte': 'Byte', 'octet': 'Octet', 'short': 'Short', 'unsigned short':", "return (interface_name == ancestor_name or ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object):", "return self.base_type def __getstate__(self): return { 'base_type': self.base_type, } def __setstate__(self, state): self.base_type", "like \"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like types.\"\"\" def __init__(self,", "http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and __eq__() methods because they are stored #", "ambiguous.' % self.name) return matching_types[0] if matching_types else None @property def string_member_type(self): return", "= member.inner_type if member.is_union_type: for inner_member in member.flattened_member_types: flattened_members[inner_member.name] = inner_member else: flattened_members[member.name]", "return IdlType.enums.get(self.name) @property def enum_type(self): return self.name if self.is_enum else None @property def", "= member return set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns the union's number of nullable", "return True @property def has_string_context(self): return 'StringContext' in self.extended_attributes @property def name(self): annotation", "members.') self.inner_type = inner_type def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't # handle the", "NotImplementedError( 'resolve_typedefs should be defined in subclasses') def idl_types(self): \"\"\"A generator which yields", "def enum_values(self): return self.element_type.enum_values @property def enum_type(self): return self.element_type.enum_type def idl_types(self): yield self", "member_type.is_sequence_type) @property def dictionary_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property def as_union_type(self): #", "the union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0 for member", "+= member.number_of_nullable_member_types return count @property def is_union_type(self): return True def single_matching_member_type(self, predicate): matching_types", "IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable because we", "# http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double', 'unrestricted double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES =", "for idl_type in member_type.idl_types(): yield idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ #", "return self @property def name(self): \"\"\"Return type name (or inner type name if", "can't # handle the '?' in nullable types (passes nullability separately). # Update", "JavaScript 'null' in the enum parsing code. inner_values = self.inner_type.enum_values if inner_values: return", "self).__init__() if is_unrestricted: self.base_type = 'unrestricted %s' % base_type else: self.base_type = base_type", "'Object' or self.name == 'Promise' ) # Promise will be basic in future", "http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions)", "set of the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We cannot use", "'USVString']): raise ValueError( 'StringContext is only applicable to string types.') def __str__(self): annotation", "'Promise': raise ValueError( 'Inner type of nullable type must not be a promise.')", "| frozenset([ # Built-in, non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString',", "} def __setstate__(self, state): self.key_type = state['key_type'] self.value_type = state['value_type'] def idl_types(self): yield", "%s' % base_type else: self.base_type = base_type def __str__(self): return self.base_type def __getstate__(self):", "# IdlTypeBase-derived class, and comparing two objects of the # same type is", "name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for member_type in self.member_types) def resolve_typedefs(self,", "will be basic in future @property def is_string_type(self): return self.name in STRING_TYPES @property", "is_nullable(self): return True @property def name(self): return self.inner_type.name + 'OrNull' @property def enum_values(self):", "def is_callback_function(self): # pylint: disable=C0103 return self.base_type in IdlType.callback_functions @property def is_custom_callback_function(self): entry", "])) TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean', 'byte': 'Byte', 'octet':", "[StringContext], and [TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type, extended_attributes):", "from |self|, including itself.\"\"\" yield self ################################################################################ # IdlType ################################################################################ class IdlType(IdlTypeBase): #", "or self.is_dictionary or self.is_enum or self.name == 'Any' or self.name == 'Object' or", "in ['DOMString', 'USVString']): raise ValueError( 'StringContext is only applicable to string types.') def", "self.key_type.name + self.value_type.name + 'Record' ################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase):", "callback_interfaces = set() dictionaries = set() enums = {} # name -> values", "# int and unsigned are not IDL types 'long', 'unsigned long', 'long long',", "short[]?, instead of treating these # as orthogonal properties (via flags). callback_functions =", "self.extended_attributes @property def name(self): annotation = ''.join( (key + ('' if val is", "@property def is_callback_function(self): # pylint: disable=C0103 return self.base_type in IdlType.callback_functions @property def is_custom_callback_function(self):", "def sequence_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property def dictionary_member_type(self): return self.single_matching_member_type( lambda", "in self.extended_attributes.items()) return '[%s] %s' % (annotation, str(self.inner_type)) def __getattr__(self, name): return getattr(self.inner_type,", "'[%s] %s' % (annotation, str(self.inner_type)) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self):", "# Update that function to handle nullability from the type name, # simplifying", "inner type. return None def resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs should be defined", "return True @property def name(self): return self.key_type.name + self.value_type.name + 'Record' ################################################################################ #", "applicable to string types.') def __str__(self): annotation = ', '.join( (key + (''", "return None def resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs should be defined in subclasses')", "# x == y # False # x.name == y.name # True #", "return self.element_type.enum_values @property def enum_type(self): return self.element_type.enum_type def idl_types(self): yield self for idl_type", "extended_attributes if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes): raise ValueError( 'Extended", "in self.member_types: if member.is_nullable: count += 1 member = member.inner_type if member.is_union_type: count", "union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0 for member in", "= element_type def __getstate__(self): return { 'element_type': self.element_type, } def __setstate__(self, state): self.element_type", "__init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted: self.base_type = 'unrestricted %s' % base_type", "that can be # found in the LICENSE file. \"\"\"IDL type handling. Classes:", "value is converted to nullptr on the C++ side, # which matches the", "same type is not the same as comparing their names. # In other", "names, the values are type # |objects|. # We assume we can use", "'unsigned long long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted double':", "return TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces)", "@property def numeric_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property def boolean_member_type(self): return self.single_matching_member_type(", "+ '?' return str(self.inner_type) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return", "@property def is_sequence_type(self): return False @property def is_frozen_array(self): return False @property def enum_values(self):", "def is_basic_type(self): return self.base_type in BASIC_TYPES @property def is_callback_function(self): # pylint: disable=C0103 return", "[[Call]] method's string types.) 'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp',", "is_array_or_sequence_type(self): return True @property def is_sequence_type(self): return False @property def is_frozen_array(self): return False", "IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable because we store them in interfaces_info.", "inner_member else: flattened_members[member.name] = member return set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns the union's", "'OrNull' @property def enum_values(self): # Nullable enums are handled by preprending a None", "return count @property def is_union_type(self): return True def single_matching_member_type(self, predicate): matching_types = list(filter(predicate,", "IdlType.enums.get(self.name) @property def enum_type(self): return self.name if self.is_enum else None @property def is_integer_type(self):", "values def __init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted: self.base_type = 'unrestricted %s'", "val in self.extended_attributes.items()) return '[%s] %s' % (annotation, str(self.inner_type)) def __getattr__(self, name): return", "in PRIMITIVE_TYPES @property def is_interface_type(self): # Anything that is not another type is", "super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type self.extended_attributes = extended_attributes if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES", "rights reserved. # Use of this source code is governed by a BSD-style", "'USVString': 'USVString', 'object': 'Object', } STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11) #", "def is_enum(self): # FIXME: add an IdlEnumType class and a resolve_enums step #", "code. inner_values = self.inner_type.enum_values if inner_values: return [None] + inner_values return None def", "def is_array_or_sequence_type(self): return True @property def is_sequence_type(self): return False @property def is_frozen_array(self): return", "class and a resolve_enums step # at end of IdlDefinitions constructor return self.name", "a resolve_enums step # at end of IdlDefinitions constructor return self.name in IdlType.enums", "the # same type is not the same as comparing their names. #", "subclasses') def idl_types(self): \"\"\"A generator which yields IdlTypes which are referenced from |self|,", "('' if val is None else val)) for key, val in sorted(self.extended_attributes.items())) return", "def is_integer_type(self): return self.base_type in INTEGER_TYPES @property def is_void(self): return self.base_type == 'void'", "'octet': 'Octet', 'short': 'Short', 'unsigned short': 'UnsignedShort', 'long': 'Long', 'unsigned long': 'UnsignedLong', 'long", "# return str(self.inner_type) + '?' return str(self.inner_type) def __getattr__(self, name): return getattr(self.inner_type, name)", "must not be a union type that ' 'itself includes a nullable type.')", "{ 'key_type': self.key_type, 'value_type': self.value_type, } def __setstate__(self, state): self.key_type = state['key_type'] self.value_type", "applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type =", "inner_type): super(IdlNullableType, self).__init__() if inner_type.name == 'Any': raise ValueError('Inner type of nullable type", "%s' % (annotation, str(self.inner_type)) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return", "__getstate__(self): return { 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, } def __setstate__(self, state): self.inner_type =", "################################################################################ # IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and", "in self.member_types ] return self def idl_types(self): yield self for member_type in self.member_types:", "self.element_type @property def name(self): return self.element_type.name + 'Array' @property def is_frozen_array(self): return True", "attributes to None (analogous to Jinja variables). # This allows us to not", "'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################ # Inheritance ################################################################################ ancestors = defaultdict(list) # interface_name", "val is None else val)) for key, val in sorted(self.extended_attributes.items())) return self.inner_type.name +", "constructor return self.name in IdlType.enums @property def enum_values(self): return IdlType.enums.get(self.name) @property def enum_type(self):", "nullable type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0: raise ValueError( 'Inner type of", "of nullable type must not be a union type that ' 'has a", "a set directly because each member is an # IdlTypeBase-derived class, and comparing", "\"look through\" a possible IdlNullableType wrapper. return self @property def name(self): \"\"\"Return type", "is_union_type(self): return True def single_matching_member_type(self, predicate): matching_types = list(filter(predicate, self.flattened_member_types)) if len(matching_types) >", "IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL type with extended attributes. [Clamp],", "'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean':", "member = member.inner_type if member.is_union_type: for inner_member in member.flattened_member_types: flattened_members[inner_member.name] = inner_member else:", "return str(self.inner_type) + '?' return str(self.inner_type) def __getattr__(self, name): return getattr(self.inner_type, name) def", "not be a union type that ' 'has a dictionary type as its", "rhs): return self.name == rhs.name def __getstate__(self): return { 'member_types': self.member_types, } def", "= value_type def __str__(self): return 'record<%s, %s>' % (self.key_type, self.value_type) def __getstate__(self): return", "def idl_types(self): yield self for idl_type in self.key_type.idl_types(): yield idl_type for idl_type in", "True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>' %", "self.element_type = self.element_type.resolve_typedefs(typedefs) return self @property def is_array_or_sequence_type(self): return True @property def is_sequence_type(self):", "contains other typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs) return self ################################################################################ # IdlUnionType ################################################################################", "long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString':", "def __getstate__(self): return { 'inner_type': self.inner_type, } def __setstate__(self, state): self.inner_type = state['inner_type']", "self.extended_attributes = state['extended_attributes'] @property def is_annotated_type(self): return True @property def has_string_context(self): return 'StringContext'", "in sorted(self.extended_attributes.items())) return self.inner_type.name + annotation def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return", "the values are type # |objects|. # We assume we can use two", "sets. def __init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types = member_types def __str__(self): return '('", "# as orthogonal properties (via flags). callback_functions = {} callback_interfaces = set() dictionaries", "def __str__(self): raise NotImplementedError('__str__() should be defined in subclasses') def __getattr__(self, name): #", "preprending a None value to the list of # enum values. This None", "[EnforceRange], [StringContext], and [TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type,", "def has_string_context(self): return 'StringContext' in self.extended_attributes @property def name(self): annotation = ''.join( (key", "@property def is_union_type(self): return True def single_matching_member_type(self, predicate): matching_types = list(filter(predicate, self.flattened_member_types)) if", "__getstate__(self): return { 'key_type': self.key_type, 'value_type': self.value_type, } def __setstate__(self, state): self.key_type =", "type is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these", "objects of the # same type is not the same as comparing their", "if inner_values: return [None] + inner_values return None def resolve_typedefs(self, typedefs): self.inner_type =", "def __getstate__(self): return { 'key_type': self.key_type, 'value_type': self.value_type, } def __setstate__(self, state): self.key_type", "base_type = self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def", "resolve_enums step # at end of IdlDefinitions constructor return self.name in IdlType.enums @property", "applicable to types: %s' % self) if ('StringContext' in extended_attributes and inner_type.base_type not", "'unsigned long long', ]) NUMERIC_TYPES = ( INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float',", "IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this like \"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base", "file. \"\"\"IDL type handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType", "IDL type with extended attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable to", "types like short?[] vs. short[]?, instead of treating these # as orthogonal properties", "this to \"look through\" a possible IdlNullableType wrapper. return self @property def name(self):", "words: # x = IdlType('ByteString') # y = IdlType('ByteString') # x == y", "type must not be a promise.') if inner_type.is_nullable: raise ValueError( 'Inner type of", "'String', 'ByteString': 'ByteString', 'USVString': 'USVString', 'object': 'Object', } STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call", "+ 'Array' @property def is_frozen_array(self): return True ################################################################################ # IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase):", "'long long': 'LongLong', 'unsigned long long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted float': 'UnrestrictedFloat', 'double':", "idl_types(self): yield self for idl_type in self.key_type.idl_types(): yield idl_type for idl_type in self.value_type.idl_types():", "return str(self.inner_type) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type':", "self.element_type.resolve_typedefs(typedefs) return self @property def is_array_or_sequence_type(self): return True @property def is_sequence_type(self): return False", "can use two IDL objects of the same type interchangeably. flattened_members = {}", "self.element_type = state['element_type'] def resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return self @property def", "10.11) # (Interface object [[Call]] method's string types.) 'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES", "# http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES = { #", "interchangeably. flattened_members = {} for member in self.member_types: if member.is_nullable: member = member.inner_type", "'Inner type of nullable type must not be a nullable type.') if inner_type.is_union_type:", "'void', ])) TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean', 'byte': 'Byte',", "@property def name(self): \"\"\"Return type name (or inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name", "type must not be a union type that ' 'itself includes a nullable", "to types: %s' % self) if ('StringContext' in extended_attributes and inner_type.base_type not in", "['DOMString', 'USVString']): raise ValueError( 'StringContext is only applicable to string types.') def __str__(self):", "self.base_type in PRIMITIVE_TYPES @property def is_interface_type(self): # Anything that is not another type", "'Extended attributes not applicable to types: %s' % self) if ('StringContext' in extended_attributes", "def __getstate__(self): return { 'member_types': self.member_types, } def __setstate__(self, state): self.member_types = state['member_types']", "self.value_type.name + 'Record' ################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self,", "self.name == rhs.name def __getstate__(self): return { 'member_types': self.member_types, } def __setstate__(self, state):", "count += member.number_of_nullable_member_types return count @property def is_union_type(self): return True def single_matching_member_type(self, predicate):", "state['key_type'] self.value_type = state['value_type'] def idl_types(self): yield self for idl_type in self.key_type.idl_types(): yield", "> 0: raise ValueError( 'Inner type of nullable type must not be a", "else '=' + val)) for key, val in self.extended_attributes.items()) return '[%s] %s' %", "@property def is_interface_type(self): # Anything that is not another type is an interface", "is_dictionary(self): return self.base_type in IdlType.dictionaries @property def is_enum(self): # FIXME: add an IdlEnumType", "@property def is_basic_type(self): return self.base_type in BASIC_TYPES @property def is_callback_function(self): # pylint: disable=C0103", "\"\"\"Returns the union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0 for", "enum values. This None value is converted to nullptr on the C++ side,", "to None (analogous to Jinja variables). # This allows us to not define", "NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES | frozenset([ # Built-in, non-composite, non-object data types", "IdlNullableType to the inner type. return None def resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs", "and comparing two objects of the # same type is not the same", "[None] + inner_values return None def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self", "'Long', 'unsigned long': 'UnsignedLong', 'long long': 'LongLong', 'unsigned long long': 'UnsignedLongLong', 'float': 'Float',", "'octet', 'short', 'unsigned short', # int and unsigned are not IDL types 'long',", "def __setstate__(self, state): self.element_type = state['element_type'] def resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return", "'Or'.join(member_type.name for member_type in self.member_types) def resolve_typedefs(self, typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs) for", "IDL types 'long', 'unsigned long', 'long long', 'unsigned long long', ]) NUMERIC_TYPES =", "'double': 'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString', 'USVString': 'USVString', 'object': 'Object',", "be defined in subclasses') def idl_types(self): \"\"\"A generator which yields IdlTypes which are", "# https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType, self).__init__() if inner_type.name == 'Any':", "type of nullable type must not be a union type that ' 'itself", "@classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls,", "dictionary_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property def as_union_type(self): # Note: Use this", "of nullable type must not be a union type that ' 'itself includes", "to not define default properties in the base class, and # allows us", "self.extended_attributes, } def __setstate__(self, state): self.inner_type = state['inner_type'] self.extended_attributes = state['extended_attributes'] @property def", "the base class, and # allows us to relay __getattr__ in IdlNullableType to", "self).__init__() self.element_type = element_type def __getstate__(self): return { 'element_type': self.element_type, } def __setstate__(self,", "double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString', 'USVString': 'USVString', 'object': 'Object', } STRING_TYPES =", "NUMERIC_TYPES = ( INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double', 'unrestricted", "self.name if self.is_enum else None @property def is_integer_type(self): return self.base_type in INTEGER_TYPES @property", "that ' 'has a dictionary type as its members.') self.inner_type = inner_type def", "def is_record_type(self): return True @property def name(self): return self.key_type.name + self.value_type.name + 'Record'", "raise ValueError( 'Inner type of nullable type must not be a promise.') if", "# y = IdlType('ByteString') # x == y # False # x.name ==", "has_string_context(self): return 'StringContext' in self.extended_attributes @property def name(self): annotation = ''.join( (key +", "== 'Any' or self.name == 'Object' or self.name == 'Promise' ) # Promise", "an IDL type with extended attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable", "self.value_type.resolve_typedefs(typedefs) return self @property def is_record_type(self): return True @property def name(self): return self.key_type.name", "__str__(self): return '(' + ' or '.join( str(member_type) for member_type in self.member_types) +", "True @property def is_sequence_type(self): return False @property def is_frozen_array(self): return False @property def", "== ancestor_name or ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base class", "%s>' % (self.key_type, self.value_type) def __getstate__(self): return { 'key_type': self.key_type, 'value_type': self.value_type, }", "idl_types(self): yield self for idl_type in self.inner_type.idl_types(): yield idl_type ################################################################################ # IdlAnnotatedType ################################################################################", "__init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type self.extended_attributes = extended_attributes if any(key", "else None @property def is_integer_type(self): return self.base_type in INTEGER_TYPES @property def is_void(self): return", "for idl_type in self.value_type.idl_types(): yield idl_type def resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type", "+ ' or '.join( str(member_type) for member_type in self.member_types) + ')' def __hash__(self):", "FIXME: add an IdlEnumType class and a resolve_enums step # at end of", "return not (self.is_basic_type or self.is_callback_function or self.is_dictionary or self.is_enum or self.name == 'Any'", "NUMERIC_TYPES @property def is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES @property def is_interface_type(self): # Anything", "# at end of IdlDefinitions constructor return self.name in IdlType.enums @property def enum_values(self):", "= member.inner_type if member.is_union_type: count += member.number_of_nullable_member_types return count @property def is_union_type(self): return", "or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType,", "to relay __getattr__ in IdlNullableType to the inner type. return None def resolve_typedefs(self,", "def name(self): return self.inner_type.name + 'OrNull' @property def enum_values(self): # Nullable enums are", "in member_type.idl_types(): yield idl_type ################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename", "http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object [[Call]] method's string types.) 'String', 'ByteString', 'USVString',", "or self.name == 'Promise' ) # Promise will be basic in future @property", "be basic in future @property def is_string_type(self): return self.name in STRING_TYPES @property def", "a typedef'ed type.\") # For the case that the resolved type contains other", "member_type: member_type.is_numeric_type) @property def boolean_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.base_type == 'boolean') @property", "# IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType, self).__init__() if", "= self.base_type if base_type in typedefs: resolved_type = typedefs[base_type] if resolved_type.base_type in typedefs:", "short?[] vs. short[]?, instead of treating these # as orthogonal properties (via flags).", "self.member_types) + ')' def __hash__(self): return hash(self.name) def __eq__(self, rhs): return self.name ==", "resolved_type = typedefs[base_type] if resolved_type.base_type in typedefs: raise ValueError(\"We can't typedef a typedef'ed", "any(member.is_dictionary for member in inner_type.flattened_member_types): raise ValueError( 'Inner type of nullable type must", "# Built-in, non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types", "return self.base_type in INTEGER_TYPES @property def is_void(self): return self.base_type == 'void' @property def", "are type names, the values are type # |objects|. # We assume we", "__eq__() methods because they are stored # in sets. def __init__(self, member_types): super(IdlUnionType,", "super(IdlSequenceType, self).__init__(element_type) def __str__(self): return 'sequence<%s>' % self.element_type @property def name(self): return self.element_type.name", "types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0 for member in self.member_types: if member.is_nullable: count", "Use this to \"look through\" a possible IdlNullableType wrapper. return self @property def", "# Inheritance ################################################################################ ancestors = defaultdict(list) # interface_name -> ancestors def inherits_interface(interface_name, ancestor_name):", "return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, } def", "in the enum parsing code. inner_values = self.inner_type.enum_values if inner_values: return [None] +", "typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs) return self ################################################################################ # IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase):", "future @property def is_string_type(self): return self.name in STRING_TYPES @property def name(self): \"\"\"Return type", "resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return self @property def is_array_or_sequence_type(self): return True @property", "in sets. def __init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types = member_types def __str__(self): return", "class IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType, self).__init__() if inner_type.name == 'Any': raise ValueError('Inner", "if not callback_function: return False return 'Custom' in callback_function.extended_attributes @property def is_callback_interface(self): return", "union type that ' 'has a dictionary type as its members.') self.inner_type =", "val)) for key, val in sorted(self.extended_attributes.items())) return self.inner_type.name + annotation def resolve_typedefs(self, typedefs):", "or '.join( str(member_type) for member_type in self.member_types) + ')' def __hash__(self): return hash(self.name)", "'ByteString', 'USVString': 'USVString', 'object': 'Object', } STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11)", "None @property def string_member_type(self): return self.single_matching_member_type( lambda member_type: (member_type.is_string_type or member_type.is_enum) ) @property", "state['element_type'] def resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return self @property def is_array_or_sequence_type(self): return", "type interchangeably. flattened_members = {} for member in self.member_types: if member.is_nullable: member =", "IdlType('ByteString') # x == y # False # x.name == y.name # True", "return 'Or'.join(member_type.name for member_type in self.member_types) def resolve_typedefs(self, typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs)", "return { 'element_type': self.element_type, } def __setstate__(self, state): self.element_type = state['element_type'] def resolve_typedefs(self,", "allows us to not define default properties in the base class, and #", "@property def is_void(self): return self.base_type == 'void' @property def is_numeric_type(self): return self.base_type in", "def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type", "return 'Custom' in callback_function.extended_attributes @property def is_callback_interface(self): return self.base_type in IdlType.callback_interfaces @property def", "return False return 'Custom' in callback_function.extended_attributes @property def is_callback_interface(self): return self.base_type in IdlType.callback_interfaces", "itself.\"\"\" yield self ################################################################################ # IdlType ################################################################################ class IdlType(IdlTypeBase): # FIXME: incorporate Nullable,", "types: %s' % self) if ('StringContext' in extended_attributes and inner_type.base_type not in ['DOMString',", "string types.') def __str__(self): annotation = ', '.join( (key + ('' if val", "these are RefPtr types. return not (self.is_basic_type or self.is_callback_function or self.is_dictionary or self.is_enum", "typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs) for member_type in self.member_types ] return self def", "@property def is_frozen_array(self): return False @property def enum_values(self): return self.element_type.enum_values @property def enum_type(self):", "def is_nullable(self): return True @property def name(self): return self.inner_type.name + 'OrNull' @property def", "be a promise.') if inner_type.is_nullable: raise ValueError( 'Inner type of nullable type must", "name(self): annotation = ''.join( (key + ('' if val is None else val))", "__getstate__(self): return { 'base_type': self.base_type, } def __setstate__(self, state): self.base_type = state['base_type'] @property", "flags). callback_functions = {} callback_interfaces = set() dictionaries = set() enums = {}", "== y # False # x.name == y.name # True # |flattened_members|'s keys", "def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type self.extended_attributes = extended_attributes if", "self.base_type, } def __setstate__(self, state): self.base_type = state['base_type'] @property def is_basic_type(self): return self.base_type", "inner_values = self.inner_type.enum_values if inner_values: return [None] + inner_values return None def resolve_typedefs(self,", "+ 'Sequence' @property def is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType,", "EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes): raise ValueError( 'Extended attributes not applicable to types:", "callback_function: return False return 'Custom' in callback_function.extended_attributes @property def is_callback_interface(self): return self.base_type in", "# False # x.name == y.name # True # |flattened_members|'s keys are type", "idl_types(self): \"\"\"A generator which yields IdlTypes which are referenced from |self|, including itself.\"\"\"", "including itself.\"\"\" yield self ################################################################################ # IdlType ################################################################################ class IdlType(IdlTypeBase): # FIXME: incorporate", "as_union_type(self): # Note: Use this to \"look through\" a possible IdlNullableType wrapper. return", "'DOMString': 'String', 'ByteString': 'ByteString', 'USVString': 'USVString', 'object': 'Object', } STRING_TYPES = frozenset([ #", "\"\"\"Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\" def __str__(self): raise NotImplementedError('__str__()", "super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type def __getstate__(self): return { 'element_type': self.element_type, } def", "member in self.member_types: if member.is_nullable: count += 1 member = member.inner_type if member.is_union_type:", "class IdlTypeBase(object): \"\"\"Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\" def __str__(self):", "# pylint: disable=C0103 return self.base_type in IdlType.callback_functions @property def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type)", "__getstate__(self): return { 'inner_type': self.inner_type, } def __setstate__(self, state): self.inner_type = state['inner_type'] @property", "} def __setstate__(self, state): self.base_type = state['base_type'] @property def is_basic_type(self): return self.base_type in", "set() dictionaries = set() enums = {} # name -> values def __init__(self,", "type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls,", "and unsigned are not IDL types 'long', 'unsigned long', 'long long', 'unsigned long", "object [[Call]] method's string types.) 'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared',", "__init__(self, inner_type): super(IdlNullableType, self).__init__() if inner_type.name == 'Any': raise ValueError('Inner type of nullable", "not define default properties in the base class, and # allows us to", "https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type self.extended_attributes =", "names. # In other words: # x = IdlType('ByteString') # y = IdlType('ByteString')", "__init__(self, key_type, value_type): super(IdlRecordType, self).__init__() self.key_type = key_type self.value_type = value_type def __str__(self):", "} STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object [[Call]] method's", "IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable because we store them in interfaces_info. \"\"\"", "key_type self.value_type = value_type def __str__(self): return 'record<%s, %s>' % (self.key_type, self.value_type) def", "disable=C0103 return self.base_type in IdlType.callback_functions @property def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function =", "Promise will be basic in future @property def is_string_type(self): return self.name in STRING_TYPES", "frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object [[Call]] method's string types.) 'String',", "name, # simplifying its signature. # return str(self.inner_type) + '?' return str(self.inner_type) def", "promise.') if inner_type.is_nullable: raise ValueError( 'Inner type of nullable type must not be", "== 'Promise': raise ValueError( 'Inner type of nullable type must not be a", "http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for member_type in self.member_types) def resolve_typedefs(self, typedefs): self.member_types =", "C++ side, # which matches the JavaScript 'null' in the enum parsing code.", "to the list of # enum values. This None value is converted to", "two IDL objects of the same type interchangeably. flattened_members = {} for member", "as orthogonal properties (via flags). callback_functions = {} callback_interfaces = set() dictionaries =", "the case that the resolved type contains other typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs)", "represents an IDL type with extended attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are", "@property def name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type return TYPE_NAMES.get(base_type,", "number_of_nullable_member_types(self): \"\"\"Returns the union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0", "BASIC_TYPES = ( PRIMITIVE_TYPES | frozenset([ # Built-in, non-composite, non-object data types #", "with extended attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types", "@property def is_annotated_type(self): return True @property def has_string_context(self): return 'StringContext' in self.extended_attributes @property", "None (analogous to Jinja variables). # This allows us to not define default", "non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES", "super(IdlRecordType, self).__init__() self.key_type = key_type self.value_type = value_type def __str__(self): return 'record<%s, %s>'", "' 'itself includes a nullable type.') if any(member.is_dictionary for member in inner_type.flattened_member_types): raise", "# We assume we can use two IDL objects of the same type", "nullable types (passes nullability separately). # Update that function to handle nullability from", "class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and __eq__() methods because they", "lambda member_type: member_type.base_type == 'boolean') @property def sequence_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type)", "else val)) for key, val in sorted(self.extended_attributes.items())) return self.inner_type.name + annotation def resolve_typedefs(self,", "= member_types def __str__(self): return '(' + ' or '.join( str(member_type) for member_type", "in future @property def is_string_type(self): return self.name in STRING_TYPES @property def name(self): \"\"\"Return", "self).__init__() self.key_type = key_type self.value_type = value_type def __str__(self): return 'record<%s, %s>' %", "yields IdlTypes which are referenced from |self|, including itself.\"\"\" yield self ################################################################################ #", "= state['key_type'] self.value_type = state['value_type'] def idl_types(self): yield self for idl_type in self.key_type.idl_types():", "subclasses') def __getattr__(self, name): # Default undefined attributes to None (analogous to Jinja", "not be a nullable type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0: raise ValueError(", "'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################ #", "use two IDL objects of the same type interchangeably. flattened_members = {} for", "hash(self.name) def __eq__(self, rhs): return self.name == rhs.name def __getstate__(self): return { 'member_types':", "IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type): super(IdlRecordType, self).__init__() self.key_type = key_type", "else None @property def string_member_type(self): return self.single_matching_member_type( lambda member_type: (member_type.is_string_type or member_type.is_enum) )", "long long': 'UnsignedLongLong', 'float': 'Float', 'unrestricted float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted double': 'UnrestrictedDouble',", "possible IdlNullableType wrapper. return self @property def name(self): \"\"\"Return type name (or inner", "'null' in the enum parsing code. inner_values = self.inner_type.enum_values if inner_values: return [None]", "etc. # to support types like short?[] vs. short[]?, instead of treating these", "in inner_type.flattened_member_types): raise ValueError( 'Inner type of nullable type must not be a", "= IdlType('ByteString') # x == y # False # x.name == y.name #", "set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type = self.base_type if base_type in typedefs:", "return resolved_type.resolve_typedefs(typedefs) return self ################################################################################ # IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union #", "set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns the union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\"", "@property def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if not callback_function: return", "% (self.key_type, self.value_type) def __getstate__(self): return { 'key_type': self.key_type, 'value_type': self.value_type, } def", "# IDL types ################################################################################ INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned", "== 'Promise' ) # Promise will be basic in future @property def is_string_type(self):", "inner_type.name == 'Any': raise ValueError('Inner type of nullable type must not be any.')", "__hash__(self): return hash(self.name) def __eq__(self, rhs): return self.name == rhs.name def __getstate__(self): return", "{ 'element_type': self.element_type, } def __setstate__(self, state): self.element_type = state['element_type'] def resolve_typedefs(self, typedefs):", "idl_type def resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return self @property", "new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod", "(passes nullability separately). # Update that function to handle nullability from the type", "self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return self @property def is_record_type(self): return True", "'Any', 'boolean': 'Boolean', 'byte': 'Byte', 'octet': 'Octet', 'short': 'Short', 'unsigned short': 'UnsignedShort', 'long':", "member_type in self.member_types) def resolve_typedefs(self, typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs) for member_type in", "'StringContext', 'TreatNullAs', ]) ################################################################################ # Inheritance ################################################################################ ancestors = defaultdict(list) # interface_name ->", "in IdlType.callback_functions @property def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if not", "them in interfaces_info. \"\"\" from collections import defaultdict ################################################################################ # IDL types ################################################################################", "should be defined in subclasses') def __getattr__(self, name): # Default undefined attributes to", "not (self.is_basic_type or self.is_callback_function or self.is_dictionary or self.is_enum or self.name == 'Any' or", "# which matches the JavaScript 'null' in the enum parsing code. inner_values =", "base_type in typedefs: resolved_type = typedefs[base_type] if resolved_type.base_type in typedefs: raise ValueError(\"We can't", "return self.base_type in PRIMITIVE_TYPES @property def is_interface_type(self): # Anything that is not another", "Built-in, non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void',", "self.is_callback_function or self.is_dictionary or self.is_enum or self.name == 'Any' or self.name == 'Object'", "inherits_interface(interface_name, ancestor_name): return (interface_name == ancestor_name or ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors)", "__getattr__ in IdlNullableType to the inner type. return None def resolve_typedefs(self, typedefs): raise", "in IdlType.enums @property def enum_values(self): return IdlType.enums.get(self.name) @property def enum_type(self): return self.name if", "return self.single_matching_member_type( lambda member_type: (member_type.is_string_type or member_type.is_enum) ) @property def numeric_member_type(self): return self.single_matching_member_type(", "= frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned short', # int and unsigned", "resolve_typedefs(self, typedefs): self.member_types = [ member_type.resolve_typedefs(typedefs) for member_type in self.member_types ] return self", "'Byte', 'octet': 'Octet', 'short': 'Short', 'unsigned short': 'UnsignedShort', 'long': 'Long', 'unsigned long': 'UnsignedLong',", "support types like short?[] vs. short[]?, instead of treating these # as orthogonal", "__init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types = member_types def __str__(self): return '(' + '", "idl_types(self): yield self for member_type in self.member_types: for idl_type in member_type.idl_types(): yield idl_type", "are referenced from |self|, including itself.\"\"\" yield self ################################################################################ # IdlType ################################################################################ class", "name -> values def __init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted: self.base_type =", "are type # |objects|. # We assume we can use two IDL objects", "} def __setstate__(self, state): self.member_types = state['member_types'] @property def flattened_member_types(self): \"\"\"Returns the set", "if base_type in typedefs: resolved_type = typedefs[base_type] if resolved_type.base_type in typedefs: raise ValueError(\"We", "to nullptr on the C++ side, # which matches the JavaScript 'null' in", "'short': 'Short', 'unsigned short': 'UnsignedShort', 'long': 'Long', 'unsigned long': 'UnsignedLong', 'long long': 'LongLong',", "+ self.value_type.name + 'Record' ################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def", "type that ' 'itself includes a nullable type.') if any(member.is_dictionary for member in", "member.is_nullable: count += 1 member = member.inner_type if member.is_union_type: count += member.number_of_nullable_member_types return", "frozenset([ # Built-in, non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString', 'ByteString', 'USVString', #", "# True # |flattened_members|'s keys are type names, the values are type #", "'float': 'Float', 'unrestricted float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString':", "ValueError( 'Inner type of nullable type must not be a promise.') if inner_type.is_nullable:", "keys are type names, the values are type # |objects|. # We assume", "self.inner_type = inner_type self.extended_attributes = extended_attributes if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key", "self @property def is_array_or_sequence_type(self): return True @property def is_sequence_type(self): return False @property def", "idl_type in self.key_type.idl_types(): yield idl_type for idl_type in self.value_type.idl_types(): yield idl_type def resolve_typedefs(self,", "any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes): raise ValueError( 'Extended attributes not", "resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs should be defined in subclasses') def idl_types(self): \"\"\"A", "self.base_type = 'unrestricted %s' % base_type else: self.base_type = base_type def __str__(self): return", "== 'Any': raise ValueError('Inner type of nullable type must not be any.') if", "raise ValueError(\"We can't typedef a typedef'ed type.\") # For the case that the", "state): self.inner_type = state['inner_type'] @property def is_nullable(self): return True @property def name(self): return", "is_string_type(self): return self.name in STRING_TYPES @property def name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\"", "self.member_types ] return self def idl_types(self): yield self for member_type in self.member_types: for", "member.flattened_member_types: flattened_members[inner_member.name] = inner_member else: flattened_members[member.name] = member return set(flattened_members.values()) @property def number_of_nullable_member_types(self):", "type.') if any(member.is_dictionary for member in inner_type.flattened_member_types): raise ValueError( 'Inner type of nullable", "self).__init__(element_type) def __str__(self): return 'sequence<%s>' % self.element_type @property def name(self): return self.element_type.name +", "return 'StringContext' in self.extended_attributes @property def name(self): annotation = ''.join( (key + (''", "member_type: member_type.is_sequence_type) @property def dictionary_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property def as_union_type(self):", "(interface_name == ancestor_name or ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class IdlTypeBase(object): \"\"\"Base", "'unsigned short': 'UnsignedShort', 'long': 'Long', 'unsigned long': 'UnsignedLong', 'long long': 'LongLong', 'unsigned long", "inner_values: return [None] + inner_values return None def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs)", "is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if not callback_function: return False return", "self.name == 'Promise' ) # Promise will be basic in future @property def", "a nullable type.') if any(member.is_dictionary for member in inner_type.flattened_member_types): raise ValueError( 'Inner type", "nullable type must not be a union type that ' 'has a dictionary", "return True ################################################################################ # IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type): super(IdlRecordType,", "or self.is_callback_function or self.is_dictionary or self.is_enum or self.name == 'Any' or self.name ==", "type.\") # For the case that the resolved type contains other typedef'ed #", "% self.element_type @property def name(self): return self.element_type.name + 'Array' @property def is_frozen_array(self): return", "value_type): super(IdlRecordType, self).__init__() self.key_type = key_type self.value_type = value_type def __str__(self): return 'record<%s,", "class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type", "single_matching_member_type(self, predicate): matching_types = list(filter(predicate, self.flattened_member_types)) if len(matching_types) > 1: raise ValueError('%s is", "__str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't # handle the '?' in nullable types (passes", "is_interface_type(self): # Anything that is not another type is an interface type. #", "new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type = self.base_type if base_type in typedefs: resolved_type", "self.element_type.enum_type def idl_types(self): yield self for idl_type in self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType):", "def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type def __getstate__(self): return { 'element_type':", "should be defined in subclasses') def idl_types(self): \"\"\"A generator which yields IdlTypes which", "undefined attributes to None (analogous to Jinja variables). # This allows us to", "@property def enum_values(self): return self.element_type.enum_values @property def enum_type(self): return self.element_type.enum_type def idl_types(self): yield", "'.join( (key + ('' if val is None else '=' + val)) for", "types.') def __str__(self): annotation = ', '.join( (key + ('' if val is", "IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable because we store them", "be a union type that ' 'has a dictionary type as its members.')", "self.base_type def __getstate__(self): return { 'base_type': self.base_type, } def __setstate__(self, state): self.base_type =", "a promise.') if inner_type.is_nullable: raise ValueError( 'Inner type of nullable type must not", "y # False # x.name == y.name # True # |flattened_members|'s keys are", "the resolved type contains other typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs) return self ################################################################################", "')' def __hash__(self): return hash(self.name) def __eq__(self, rhs): return self.name == rhs.name def", "Authors. All rights reserved. # Use of this source code is governed by", "nullptr on the C++ side, # which matches the JavaScript 'null' in the", "nullable type.') if any(member.is_dictionary for member in inner_type.flattened_member_types): raise ValueError( 'Inner type of", "0 for member in self.member_types: if member.is_nullable: count += 1 member = member.inner_type", "self.value_type = state['value_type'] def idl_types(self): yield self for idl_type in self.key_type.idl_types(): yield idl_type", "in subclasses') def idl_types(self): \"\"\"A generator which yields IdlTypes which are referenced from", "Use of this source code is governed by a BSD-style license that can", "self).__init__() self.member_types = member_types def __str__(self): return '(' + ' or '.join( str(member_type)", "self.inner_type = inner_type def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't # handle the '?'", "can't typedef a typedef'ed type.\") # For the case that the resolved type", "# http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are RefPtr types. return not", "= state['value_type'] def idl_types(self): yield self for idl_type in self.key_type.idl_types(): yield idl_type for", "enum parsing code. inner_values = self.inner_type.enum_values if inner_values: return [None] + inner_values return", "self.key_type.idl_types(): yield idl_type for idl_type in self.value_type.idl_types(): yield idl_type def resolve_typedefs(self, typedefs): self.key_type", "type name, # simplifying its signature. # return str(self.inner_type) + '?' return str(self.inner_type)", "attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def", "return self @property def is_record_type(self): return True @property def name(self): return self.key_type.name +", "methods because they are stored # in sets. def __init__(self, member_types): super(IdlUnionType, self).__init__()", "Jinja variables). # This allows us to not define default properties in the", "% self) if ('StringContext' in extended_attributes and inner_type.base_type not in ['DOMString', 'USVString']): raise", "cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self,", "def numeric_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property def boolean_member_type(self): return self.single_matching_member_type( lambda", "'Object', } STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object [[Call]]", "they are stored # in sets. def __init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types =", "True @property def has_string_context(self): return 'StringContext' in self.extended_attributes @property def name(self): annotation =", "raise NotImplementedError( 'resolve_typedefs should be defined in subclasses') def idl_types(self): \"\"\"A generator which", "or self.name == 'Any' or self.name == 'Object' or self.name == 'Promise' )", "set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type =", "FIXME: Dictionary::ConversionContext::setConversionType can't # handle the '?' in nullable types (passes nullability separately).", "to handle nullability from the type name, # simplifying its signature. # return", "+ ('' if val is None else '=' + val)) for key, val", "'long': 'Long', 'unsigned long': 'UnsignedLong', 'long long': 'LongLong', 'unsigned long long': 'UnsignedLongLong', 'float':", "is ambiguous.' % self.name) return matching_types[0] if matching_types else None @property def string_member_type(self):", "else: self.base_type = base_type def __str__(self): return self.base_type def __getstate__(self): return { 'base_type':", "@property def is_numeric_type(self): return self.base_type in NUMERIC_TYPES @property def is_primitive_type(self): return self.base_type in", "which yields IdlTypes which are referenced from |self|, including itself.\"\"\" yield self ################################################################################", "' or '.join( str(member_type) for member_type in self.member_types) + ')' def __hash__(self): return", "return self.name == rhs.name def __getstate__(self): return { 'member_types': self.member_types, } def __setstate__(self,", "any.') if inner_type.name == 'Promise': raise ValueError( 'Inner type of nullable type must", "= set() enums = {} # name -> values def __init__(self, base_type, is_unrestricted=False):", "return '[%s] %s' % (annotation, str(self.inner_type)) def __getattr__(self, name): return getattr(self.inner_type, name) def", "count = 0 for member in self.member_types: if member.is_nullable: count += 1 member", "str(self.inner_type)) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type,", "self.value_type.idl_types(): yield idl_type def resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return", "annotation = ''.join( (key + ('' if val is None else val)) for", "a dictionary type as its members.') self.inner_type = inner_type def __str__(self): # FIXME:", "self.key_type, 'value_type': self.value_type, } def __setstate__(self, state): self.key_type = state['key_type'] self.value_type = state['value_type']", "type contains other typedef'ed # type(s). return resolved_type.resolve_typedefs(typedefs) return self ################################################################################ # IdlUnionType", "the LICENSE file. \"\"\"IDL type handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType", "self.base_type in NUMERIC_TYPES @property def is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES @property def is_interface_type(self):", "return { 'inner_type': self.inner_type, } def __setstate__(self, state): self.inner_type = state['inner_type'] @property def", "= {} for member in self.member_types: if member.is_nullable: member = member.inner_type if member.is_union_type:", "if len(matching_types) > 1: raise ValueError('%s is ambiguous.' % self.name) return matching_types[0] if", "dictionaries = set() enums = {} # name -> values def __init__(self, base_type,", "__hash__() and __eq__() methods because they are stored # in sets. def __init__(self,", "key, val in self.extended_attributes.items()) return '[%s] %s' % (annotation, str(self.inner_type)) def __getattr__(self, name):", "\"\"\"Returns the set of the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\" # We", "attributes not applicable to types: %s' % self) if ('StringContext' in extended_attributes and", "if member.is_union_type: for inner_member in member.flattened_member_types: flattened_members[inner_member.name] = inner_member else: flattened_members[member.name] = member", "an interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are RefPtr", "inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type self.extended_attributes = extended_attributes if any(key not", "self.is_dictionary or self.is_enum or self.name == 'Any' or self.name == 'Object' or self.name", "'any': 'Any', 'boolean': 'Boolean', 'byte': 'Byte', 'octet': 'Octet', 'short': 'Short', 'unsigned short': 'UnsignedShort',", "'boolean') @property def sequence_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property def dictionary_member_type(self): return", "in self.key_type.idl_types(): yield idl_type for idl_type in self.value_type.idl_types(): yield idl_type def resolve_typedefs(self, typedefs):", "entry.get('callback_function') if not callback_function: return False return 'Custom' in callback_function.extended_attributes @property def is_callback_interface(self):", "'DOMString', 'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name 'any':", "IdlTypes which are referenced from |self|, including itself.\"\"\" yield self ################################################################################ # IdlType", "BSD-style license that can be # found in the LICENSE file. \"\"\"IDL type", "in self.value_type.idl_types(): yield idl_type def resolve_typedefs(self, typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs)", "for key, val in self.extended_attributes.items()) return '[%s] %s' % (annotation, str(self.inner_type)) def __getattr__(self,", "yield self for idl_type in self.inner_type.idl_types(): yield idl_type ################################################################################ # IdlAnnotatedType ################################################################################ class", "= { # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean', 'byte': 'Byte', 'octet': 'Octet', 'short':", "IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\" def __str__(self): raise NotImplementedError('__str__() should be defined in", "( INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double', 'unrestricted double', ]))", "= 'unrestricted %s' % base_type else: self.base_type = base_type def __str__(self): return self.base_type", "'(' + ' or '.join( str(member_type) for member_type in self.member_types) + ')' def", "raise ValueError('%s is ambiguous.' % self.name) return matching_types[0] if matching_types else None @property", "this source code is governed by a BSD-style license that can be #", "in interfaces_info. \"\"\" from collections import defaultdict ################################################################################ # IDL types ################################################################################ INTEGER_TYPES", "entry = IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if not callback_function: return False return 'Custom'", "# type(s). return resolved_type.resolve_typedefs(typedefs) return self ################################################################################ # IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): #", "dictionary type as its members.') self.inner_type = inner_type def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType", "raise ValueError( 'StringContext is only applicable to string types.') def __str__(self): annotation =", "self.element_type.name + 'Sequence' @property def is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type):", "is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return", "self.value_type = self.value_type.resolve_typedefs(typedefs) return self @property def is_record_type(self): return True @property def name(self):", "base_type) @classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def", "(key + ('' if val is None else val)) for key, val in", "'ByteString': 'ByteString', 'USVString': 'USVString', 'object': 'Object', } STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step", "are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type", "Default undefined attributes to None (analogous to Jinja variables). # This allows us", "'base_type': self.base_type, } def __setstate__(self, state): self.base_type = state['base_type'] @property def is_basic_type(self): return", "IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable because we store", "nullability separately). # Update that function to handle nullability from the type name,", "def is_void(self): return self.base_type == 'void' @property def is_numeric_type(self): return self.base_type in NUMERIC_TYPES", "member_type in self.member_types ] return self def idl_types(self): yield self for member_type in", "class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>' % self.element_type", "enums are handled by preprending a None value to the list of #", "-> values def __init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted: self.base_type = 'unrestricted", "interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are RefPtr types.", "else: flattened_members[member.name] = member return set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns the union's number", "def flattened_member_types(self): \"\"\"Returns the set of the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types \"\"\"", "union type that ' 'itself includes a nullable type.') if any(member.is_dictionary for member", "is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted: self.base_type = 'unrestricted %s' % base_type else: self.base_type", "enum_values(self): return IdlType.enums.get(self.name) @property def enum_type(self): return self.name if self.is_enum else None @property", "is_sequence_type(self): return False @property def is_frozen_array(self): return False @property def enum_values(self): return self.element_type.enum_values", "self.name == 'Object' or self.name == 'Promise' ) # Promise will be basic", "Update that function to handle nullability from the type name, # simplifying its", "IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self): return 'sequence<%s>' % self.element_type @property", "super(IdlUnionType, self).__init__() self.member_types = member_types def __str__(self): return '(' + ' or '.join(", "__setstate__(self, state): self.element_type = state['element_type'] def resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs) return self", "Inheritance ################################################################################ ancestors = defaultdict(list) # interface_name -> ancestors def inherits_interface(interface_name, ancestor_name): return", "In C++ these are RefPtr types. return not (self.is_basic_type or self.is_callback_function or self.is_dictionary", "can be # found in the LICENSE file. \"\"\"IDL type handling. Classes: IdlTypeBase", "double', ])) # http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES", "that is not another type is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface", "= state['inner_type'] self.extended_attributes = state['extended_attributes'] @property def is_annotated_type(self): return True @property def has_string_context(self):", "name(self): return self.element_type.name + 'Array' @property def is_frozen_array(self): return True ################################################################################ # IdlRecordType", "yield self ################################################################################ # IdlType ################################################################################ class IdlType(IdlTypeBase): # FIXME: incorporate Nullable, etc.", "name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod", "because each member is an # IdlTypeBase-derived class, and comparing two objects of", "= self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return self @property def is_record_type(self): return True @property", "the same as comparing their names. # In other words: # x =", "0: raise ValueError( 'Inner type of nullable type must not be a union", "is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are", "# http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean',", "flattened_members = {} for member in self.member_types: if member.is_nullable: member = member.inner_type if", "None else val)) for key, val in sorted(self.extended_attributes.items())) return self.inner_type.name + annotation def", "self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self for idl_type in self.inner_type.idl_types():", "inner_type self.extended_attributes = extended_attributes if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes):", "|self|, including itself.\"\"\" yield self ################################################################################ # IdlType ################################################################################ class IdlType(IdlTypeBase): # FIXME:", "matching_types[0] if matching_types else None @property def string_member_type(self): return self.single_matching_member_type( lambda member_type: (member_type.is_string_type", "self.flattened_member_types)) if len(matching_types) > 1: raise ValueError('%s is ambiguous.' % self.name) return matching_types[0]", "################################################################################ class IdlType(IdlTypeBase): # FIXME: incorporate Nullable, etc. # to support types like", "################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType, self).__init__()", "typedefs: raise ValueError(\"We can't typedef a typedef'ed type.\") # For the case that", "x = IdlType('ByteString') # y = IdlType('ByteString') # x == y # False", "http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types \"\"\" count = 0 for member in self.member_types: if member.is_nullable: count +=", "\"\"\" from collections import defaultdict ################################################################################ # IDL types ################################################################################ INTEGER_TYPES = frozenset([", "def is_annotated_type(self): return True @property def has_string_context(self): return 'StringContext' in self.extended_attributes @property def", "and # allows us to relay __getattr__ in IdlNullableType to the inner type.", "################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an IDL type with extended attributes. [Clamp], [EnforceRange],", "# In C++ these are RefPtr types. return not (self.is_basic_type or self.is_callback_function or", "| NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES | frozenset([ # Built-in, non-composite, non-object data", "return False @property def is_frozen_array(self): return False @property def enum_values(self): return self.element_type.enum_values @property", "self.inner_type, } def __setstate__(self, state): self.inner_type = state['inner_type'] @property def is_nullable(self): return True", "def __str__(self): annotation = ', '.join( (key + ('' if val is None", "value_type def __str__(self): return 'record<%s, %s>' % (self.key_type, self.value_type) def __getstate__(self): return {", "nullability from the type name, # simplifying its signature. # return str(self.inner_type) +", "# x.name == y.name # True # |flattened_members|'s keys are type names, the", "__setstate__(self, state): self.key_type = state['key_type'] self.value_type = state['value_type'] def idl_types(self): yield self for", "types.) 'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs',", "us to not define default properties in the base class, and # allows", "+ annotation def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield", "def __init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types = member_types def __str__(self): return '(' +", "self.inner_type = state['inner_type'] self.extended_attributes = state['extended_attributes'] @property def is_annotated_type(self): return True @property def", "= typedefs[base_type] if resolved_type.base_type in typedefs: raise ValueError(\"We can't typedef a typedef'ed type.\")", "member_type: member_type.is_dictionary) @property def as_union_type(self): # Note: Use this to \"look through\" a", "@property def is_array_or_sequence_type(self): return True @property def is_sequence_type(self): return False @property def is_frozen_array(self):", "enum_values(self): # Nullable enums are handled by preprending a None value to the", "'?' in nullable types (passes nullability separately). # Update that function to handle", "# in sets. def __init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types = member_types def __str__(self):", "is_callback_interface(self): return self.base_type in IdlType.callback_interfaces @property def is_dictionary(self): return self.base_type in IdlType.dictionaries @property", "This allows us to not define default properties in the base class, and", "self @property def is_record_type(self): return True @property def name(self): return self.key_type.name + self.value_type.name", "% self.element_type @property def name(self): return self.element_type.name + 'Sequence' @property def is_sequence_type(self): return", "2014 The Chromium Authors. All rights reserved. # Use of this source code", "defaultdict(list) # interface_name -> ancestors def inherits_interface(interface_name, ancestor_name): return (interface_name == ancestor_name or", "self.member_types, } def __setstate__(self, state): self.member_types = state['member_types'] @property def flattened_member_types(self): \"\"\"Returns the", "value to the list of # enum values. This None value is converted", "IdlAnnotatedType IdlTypes are picklable because we store them in interfaces_info. \"\"\" from collections", "and __eq__() methods because they are stored # in sets. def __init__(self, member_types):", "state): self.member_types = state['member_types'] @property def flattened_member_types(self): \"\"\"Returns the set of the union's", "inner_type.flattened_member_types): raise ValueError( 'Inner type of nullable type must not be a union", "self.single_matching_member_type( lambda member_type: member_type.base_type == 'boolean') @property def sequence_member_type(self): return self.single_matching_member_type( lambda member_type:", "def __setstate__(self, state): self.member_types = state['member_types'] @property def flattened_member_types(self): \"\"\"Returns the set of", "member return set(flattened_members.values()) @property def number_of_nullable_member_types(self): \"\"\"Returns the union's number of nullable types.", "is only applicable to string types.') def __str__(self): annotation = ', '.join( (key", "'USVString', 'object': 'Object', } STRING_TYPES = frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface", "raise ValueError( 'Inner type of nullable type must not be a nullable type.')", "class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. \"\"\" def __str__(self): raise NotImplementedError('__str__() should", "EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################ # Inheritance ################################################################################", "member.is_union_type: for inner_member in member.flattened_member_types: flattened_members[inner_member.name] = inner_member else: flattened_members[member.name] = member return", "Dictionary::ConversionContext::setConversionType can't # handle the '?' in nullable types (passes nullability separately). #", "type(s). return resolved_type.resolve_typedefs(typedefs) return self ################################################################################ # IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union", "defined in subclasses') def idl_types(self): \"\"\"A generator which yields IdlTypes which are referenced", "def __str__(self): return self.base_type def __getstate__(self): return { 'base_type': self.base_type, } def __setstate__(self,", "class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self): return 'sequence<%s>' % self.element_type", "@property def string_member_type(self): return self.single_matching_member_type( lambda member_type: (member_type.is_string_type or member_type.is_enum) ) @property def", "str(member_type) for member_type in self.member_types) + ')' def __hash__(self): return hash(self.name) def __eq__(self,", "each member is an # IdlTypeBase-derived class, and comparing two objects of the", "in member.flattened_member_types: flattened_members[inner_member.name] = inner_member else: flattened_members[member.name] = member return set(flattened_members.values()) @property def", "IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if not callback_function: return False return 'Custom' in callback_function.extended_attributes", "must not be any.') if inner_type.name == 'Promise': raise ValueError( 'Inner type of", "function to handle nullability from the type name, # simplifying its signature. #", "def idl_types(self): yield self for idl_type in self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def", "'Sequence' @property def is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type)", "of nullable type must not be any.') if inner_type.name == 'Promise': raise ValueError(", "enum_values(self): return self.element_type.enum_values @property def enum_type(self): return self.element_type.enum_type def idl_types(self): yield self for", "name(self): return self.inner_type.name + 'OrNull' @property def enum_values(self): # Nullable enums are handled", "state['base_type'] @property def is_basic_type(self): return self.base_type in BASIC_TYPES @property def is_callback_function(self): # pylint:", "self def idl_types(self): yield self for member_type in self.member_types: for idl_type in member_type.idl_types():", "def as_union_type(self): # Note: Use this to \"look through\" a possible IdlNullableType wrapper.", "\"\"\"Base class for array-like types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type", "# IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and __eq__()", "# enum values. This None value is converted to nullptr on the C++", "'.join( str(member_type) for member_type in self.member_types) + ')' def __hash__(self): return hash(self.name) def", "idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self): return 'sequence<%s>' %", "################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType, self).__init__() if inner_type.name ==", "= key_type self.value_type = value_type def __str__(self): return 'record<%s, %s>' % (self.key_type, self.value_type)", "@property def name(self): return self.element_type.name + 'Array' @property def is_frozen_array(self): return True ################################################################################", "not be a union type that ' 'itself includes a nullable type.') if", "= state['member_types'] @property def flattened_member_types(self): \"\"\"Returns the set of the union's flattened member", "return self.inner_type.name + 'OrNull' @property def enum_values(self): # Nullable enums are handled by", "if inner_type.number_of_nullable_member_types > 0: raise ValueError( 'Inner type of nullable type must not", "= state['base_type'] @property def is_basic_type(self): return self.base_type in BASIC_TYPES @property def is_callback_function(self): #", "# http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES | frozenset([", "return hash(self.name) def __eq__(self, rhs): return self.name == rhs.name def __getstate__(self): return {", "count += 1 member = member.inner_type if member.is_union_type: count += member.number_of_nullable_member_types return count", "yield self for member_type in self.member_types: for idl_type in member_type.idl_types(): yield idl_type ################################################################################", "'unsigned short', # int and unsigned are not IDL types 'long', 'unsigned long',", "def is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self):", "def is_frozen_array(self): return False @property def enum_values(self): return self.element_type.enum_values @property def enum_type(self): return", "parsing code. inner_values = self.inner_type.enum_values if inner_values: return [None] + inner_values return None", "enums = {} # name -> values def __init__(self, base_type, is_unrestricted=False): super(IdlType, self).__init__()", "long', ]) NUMERIC_TYPES = ( INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float',", "@property def as_union_type(self): # Note: Use this to \"look through\" a possible IdlNullableType", "long', 'long long', 'unsigned long long', ]) NUMERIC_TYPES = ( INTEGER_TYPES | frozenset([", "self.is_enum else None @property def is_integer_type(self): return self.base_type in INTEGER_TYPES @property def is_void(self):", "False @property def is_frozen_array(self): return False @property def enum_values(self): return self.element_type.enum_values @property def", "not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes): raise ValueError( 'Extended attributes not applicable", "# FIXME: add an IdlEnumType class and a resolve_enums step # at end", "in IdlNullableType to the inner type. return None def resolve_typedefs(self, typedefs): raise NotImplementedError(", "################################################################################ # IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self, key_type, value_type): super(IdlRecordType, self).__init__() self.key_type", "in BASIC_TYPES @property def is_callback_function(self): # pylint: disable=C0103 return self.base_type in IdlType.callback_functions @property", "self.name) return matching_types[0] if matching_types else None @property def string_member_type(self): return self.single_matching_member_type( lambda", "self for idl_type in self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlSequenceType,", "self.base_type in INTEGER_TYPES @property def is_void(self): return self.base_type == 'void' @property def is_numeric_type(self):", "IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>' % self.element_type @property", "\"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like types.\"\"\" def __init__(self, element_type):", "def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, }", "''.join( (key + ('' if val is None else val)) for key, val", "\"\"\" def __init__(self, inner_type, extended_attributes): super(IdlAnnotatedType, self).__init__() self.inner_type = inner_type self.extended_attributes = extended_attributes", "or self.is_enum or self.name == 'Any' or self.name == 'Object' or self.name ==", "'Inner type of nullable type must not be a promise.') if inner_type.is_nullable: raise", "'short', 'unsigned short', # int and unsigned are not IDL types 'long', 'unsigned", "# simplifying its signature. # return str(self.inner_type) + '?' return str(self.inner_type) def __getattr__(self,", "name(self): \"\"\"Return type name (or inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return", "################################################################################ # TODO(bashi): Rename this like \"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class", "set directly because each member is an # IdlTypeBase-derived class, and comparing two", "we store them in interfaces_info. \"\"\" from collections import defaultdict ################################################################################ # IDL", "in subclasses') def __getattr__(self, name): # Default undefined attributes to None (analogous to", "@property def is_enum(self): # FIXME: add an IdlEnumType class and a resolve_enums step", "BASIC_TYPES @property def is_callback_function(self): # pylint: disable=C0103 return self.base_type in IdlType.callback_functions @property def", "IdlType('ByteString') # y = IdlType('ByteString') # x == y # False # x.name", "for array-like types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type def __getstate__(self):", "as comparing their names. # In other words: # x = IdlType('ByteString') #", "properties in the base class, and # allows us to relay __getattr__ in", "# to support types like short?[] vs. short[]?, instead of treating these #", "matches the JavaScript 'null' in the enum parsing code. inner_values = self.inner_type.enum_values if", "def name(self): return self.element_type.name + 'Array' @property def is_frozen_array(self): return True ################################################################################ #", "None value is converted to nullptr on the C++ side, # which matches", "converted to nullptr on the C++ side, # which matches the JavaScript 'null'", "def __str__(self): return 'sequence<%s>' % self.element_type @property def name(self): return self.element_type.name + 'Sequence'", "assume we can use two IDL objects of the same type interchangeably. flattened_members", "'itself includes a nullable type.') if any(member.is_dictionary for member in inner_type.flattened_member_types): raise ValueError(", "member.inner_type if member.is_union_type: count += member.number_of_nullable_member_types return count @property def is_union_type(self): return True", "defaultdict ################################################################################ # IDL types ################################################################################ INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet',", "self.value_type) def __getstate__(self): return { 'key_type': self.key_type, 'value_type': self.value_type, } def __setstate__(self, state):", "IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this like \"IdlArrayTypeBase\" or something. class", "('StringContext' in extended_attributes and inner_type.base_type not in ['DOMString', 'USVString']): raise ValueError( 'StringContext is", "sorted(self.extended_attributes.items())) return self.inner_type.name + annotation def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self", "'unrestricted float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString', 'USVString':", "directly because each member is an # IdlTypeBase-derived class, and comparing two objects", "def set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type = self.base_type if base_type in", "inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0: raise ValueError( 'Inner type of nullable type must", "def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, 'extended_attributes':", "'ByteString', 'USVString', # http://heycam.github.io/webidl/#idl-types 'void', ])) TYPE_NAMES = { # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any',", "def is_frozen_array(self): return True ################################################################################ # IdlRecordType ################################################################################ class IdlRecordType(IdlTypeBase): def __init__(self, key_type,", "return self @property def is_array_or_sequence_type(self): return True @property def is_sequence_type(self): return False @property", "self.element_type @property def name(self): return self.element_type.name + 'Sequence' @property def is_sequence_type(self): return True", "self) if ('StringContext' in extended_attributes and inner_type.base_type not in ['DOMString', 'USVString']): raise ValueError(", "self.value_type = value_type def __str__(self): return 'record<%s, %s>' % (self.key_type, self.value_type) def __getstate__(self):", "return 'sequence<%s>' % self.element_type @property def name(self): return self.element_type.name + 'Sequence' @property def", "'unsigned long', 'long long', 'unsigned long long', ]) NUMERIC_TYPES = ( INTEGER_TYPES |", "add an IdlEnumType class and a resolve_enums step # at end of IdlDefinitions", "def __init__(self, inner_type): super(IdlNullableType, self).__init__() if inner_type.name == 'Any': raise ValueError('Inner type of", "member_type: member_type.base_type == 'boolean') @property def sequence_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property", "__getattr__(self, name): # Default undefined attributes to None (analogous to Jinja variables). #", "if member.is_nullable: member = member.inner_type if member.is_union_type: for inner_member in member.flattened_member_types: flattened_members[inner_member.name] =", "str(self.inner_type) + '?' return str(self.inner_type) def __getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self):", "annotation = ', '.join( (key + ('' if val is None else '='", "'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString', 'USVString': 'USVString', 'object': 'Object', }", "type of nullable type must not be a nullable type.') if inner_type.is_union_type: if", "= self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls,", "these # as orthogonal properties (via flags). callback_functions = {} callback_interfaces = set()", "This None value is converted to nullptr on the C++ side, # which", "class for array-like types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type = element_type def", "'byte', 'octet', 'short', 'unsigned short', # int and unsigned are not IDL types", "'UnsignedLongLong', 'float': 'Float', 'unrestricted float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String',", "its members.') self.inner_type = inner_type def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't # handle", "= frozenset([ # http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object [[Call]] method's string types.)", "self.element_type = element_type def __getstate__(self): return { 'element_type': self.element_type, } def __setstate__(self, state):", "interfaces_info. \"\"\" from collections import defaultdict ################################################################################ # IDL types ################################################################################ INTEGER_TYPES =", "|flattened_members|'s keys are type names, the values are type # |objects|. # We", "IdlNullableType. \"\"\" def __str__(self): raise NotImplementedError('__str__() should be defined in subclasses') def __getattr__(self,", "__setstate__(self, state): self.member_types = state['member_types'] @property def flattened_member_types(self): \"\"\"Returns the set of the", "def __getattr__(self, name): # Default undefined attributes to None (analogous to Jinja variables).", "member in self.member_types: if member.is_nullable: member = member.inner_type if member.is_union_type: for inner_member in", "if inner_type.is_nullable: raise ValueError( 'Inner type of nullable type must not be a", "We assume we can use two IDL objects of the same type interchangeably.", "inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for member_type in self.member_types)", "def idl_types(self): yield self for member_type in self.member_types: for idl_type in member_type.idl_types(): yield", "@property def sequence_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property def dictionary_member_type(self): return self.single_matching_member_type(", "'key_type': self.key_type, 'value_type': self.value_type, } def __setstate__(self, state): self.key_type = state['key_type'] self.value_type =", "ValueError( 'Inner type of nullable type must not be a nullable type.') if", "@property def name(self): return self.key_type.name + self.value_type.name + 'Record' ################################################################################ # IdlNullableType ################################################################################", "def enum_values(self): return IdlType.enums.get(self.name) @property def enum_type(self): return self.name if self.is_enum else None", "@property def is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType): def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def", "type as its members.') self.inner_type = inner_type def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't", "for member_type in self.member_types) + ')' def __hash__(self): return hash(self.name) def __eq__(self, rhs):", "self.single_matching_member_type( lambda member_type: member_type.is_sequence_type) @property def dictionary_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property", "name(self): return self.key_type.name + self.value_type.name + 'Record' ################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type", "inner_type.is_nullable: raise ValueError( 'Inner type of nullable type must not be a nullable", "# (Interface object [[Call]] method's string types.) 'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES =", "IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like types.\"\"\" def __init__(self, element_type): super(IdlArrayOrSequenceType, self).__init__() self.element_type =", "base_type = self.base_type if base_type in typedefs: resolved_type = typedefs[base_type] if resolved_type.base_type in", "only applicable to string types.') def __str__(self): annotation = ', '.join( (key +", "self.base_type in IdlType.callback_interfaces @property def is_dictionary(self): return self.base_type in IdlType.dictionaries @property def is_enum(self):", "'byte': 'Byte', 'octet': 'Octet', 'short': 'Short', 'unsigned short': 'UnsignedShort', 'long': 'Long', 'unsigned long':", "return self.element_type.name + 'Array' @property def is_frozen_array(self): return True ################################################################################ # IdlRecordType ################################################################################", "a nullable type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types > 0: raise ValueError( 'Inner type", "if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for member_type in self.member_types) def resolve_typedefs(self, typedefs):", "float': 'UnrestrictedFloat', 'double': 'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString', 'USVString': 'USVString',", "self.extended_attributes = extended_attributes if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes): raise", "'Any' or self.name == 'Object' or self.name == 'Promise' ) # Promise will", "ValueError('Inner type of nullable type must not be any.') if inner_type.name == 'Promise':", "'UnrestrictedFloat', 'double': 'Double', 'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString', 'USVString': 'USVString', 'object':", "return self.single_matching_member_type( lambda member_type: member_type.is_dictionary) @property def as_union_type(self): # Note: Use this to", "# IdlType ################################################################################ class IdlType(IdlTypeBase): # FIXME: incorporate Nullable, etc. # to support", "of nullable type must not be a nullable type.') if inner_type.is_union_type: if inner_type.number_of_nullable_member_types", "'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString', 'USVString': 'USVString', 'object': 'Object', } STRING_TYPES = frozenset([", "inner_type.base_type not in ['DOMString', 'USVString']): raise ValueError( 'StringContext is only applicable to string", "if is_unrestricted: self.base_type = 'unrestricted %s' % base_type else: self.base_type = base_type def", "None def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def idl_types(self): yield self", "} def __setstate__(self, state): self.element_type = state['element_type'] def resolve_typedefs(self, typedefs): self.element_type = self.element_type.resolve_typedefs(typedefs)", "comparing their names. # In other words: # x = IdlType('ByteString') # y", "element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self): return 'sequence<%s>' % self.element_type @property def name(self): return", "def __init__(self, element_type): super(IdlFrozenArrayType, self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>' % self.element_type @property def", "def __setstate__(self, state): self.inner_type = state['inner_type'] @property def is_nullable(self): return True @property def", "(key + ('' if val is None else '=' + val)) for key,", "|objects|. # We assume we can use two IDL objects of the same", "handle the '?' in nullable types (passes nullability separately). # Update that function", "governed by a BSD-style license that can be # found in the LICENSE", "################################################################################ INTEGER_TYPES = frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte', 'octet', 'short', 'unsigned short', # int", "]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################ # Inheritance", "key, val in sorted(self.extended_attributes.items())) return self.inner_type.name + annotation def resolve_typedefs(self, typedefs): self.inner_type =", "def __str__(self): return '(' + ' or '.join( str(member_type) for member_type in self.member_types)", "'StringContext' in self.extended_attributes @property def name(self): annotation = ''.join( (key + ('' if", "from collections import defaultdict ################################################################################ # IDL types ################################################################################ INTEGER_TYPES = frozenset([ #", "def name(self): return self.element_type.name + 'Sequence' @property def is_sequence_type(self): return True class IdlFrozenArrayType(IdlArrayOrSequenceType):", "self.inner_type.name + 'OrNull' @property def enum_values(self): # Nullable enums are handled by preprending", "'unrestricted double': 'UnrestrictedDouble', 'DOMString': 'String', 'ByteString': 'ByteString', 'USVString': 'USVString', 'object': 'Object', } STRING_TYPES", "which are referenced from |self|, including itself.\"\"\" yield self ################################################################################ # IdlType ################################################################################", "(Interface object [[Call]] method's string types.) 'String', 'ByteString', 'USVString', ]) EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = frozenset([", "base_type else: self.base_type = base_type def __str__(self): return self.base_type def __getstate__(self): return {", "\"\"\"IDL type handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes", "[ member_type.resolve_typedefs(typedefs) for member_type in self.member_types ] return self def idl_types(self): yield self", "= ( PRIMITIVE_TYPES | frozenset([ # Built-in, non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types", "for member in self.member_types: if member.is_nullable: count += 1 member = member.inner_type if", "PRIMITIVE_TYPES @property def is_interface_type(self): # Anything that is not another type is an", "'Record' ################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self, inner_type): super(IdlNullableType,", "return self.base_type in NUMERIC_TYPES @property def is_primitive_type(self): return self.base_type in PRIMITIVE_TYPES @property def", "= state['extended_attributes'] @property def is_annotated_type(self): return True @property def has_string_context(self): return 'StringContext' in", "type name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for member_type in self.member_types) def", "member.inner_type if member.is_union_type: for inner_member in member.flattened_member_types: flattened_members[inner_member.name] = inner_member else: flattened_members[member.name] =", "self).__init__() if inner_type.name == 'Any': raise ValueError('Inner type of nullable type must not", "[Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types \"\"\" def __init__(self,", "'FrozenArray<%s>' % self.element_type @property def name(self): return self.element_type.name + 'Array' @property def is_frozen_array(self):", "\"\"\"Return type name (or inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name", "set() enums = {} # name -> values def __init__(self, base_type, is_unrestricted=False): super(IdlType,", "{ # http://heycam.github.io/webidl/#dfn-type-name 'any': 'Any', 'boolean': 'Boolean', 'byte': 'Byte', 'octet': 'Octet', 'short': 'Short',", "{ 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, } def __setstate__(self, state): self.inner_type = state['inner_type'] self.extended_attributes", "ancestor_name): return (interface_name == ancestor_name or ancestor_name in ancestors[interface_name]) def set_ancestors(new_ancestors): ancestors.update(new_ancestors) class", "is not another type is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface #", "def resolve_typedefs(self, typedefs): raise NotImplementedError( 'resolve_typedefs should be defined in subclasses') def idl_types(self):", "__init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self): return 'sequence<%s>' % self.element_type @property def name(self):", "defined in subclasses') def __getattr__(self, name): # Default undefined attributes to None (analogous", "a possible IdlNullableType wrapper. return self @property def name(self): \"\"\"Return type name (or", "interface_name -> ancestors def inherits_interface(interface_name, ancestor_name): return (interface_name == ancestor_name or ancestor_name in", "self.member_types = [ member_type.resolve_typedefs(typedefs) for member_type in self.member_types ] return self def idl_types(self):", "@property def name(self): return self.element_type.name + 'Sequence' @property def is_sequence_type(self): return True class", "def name(self): \"\"\"Return type name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type return TYPE_NAMES.get(base_type, base_type)", "member is an # IdlTypeBase-derived class, and comparing two objects of the #", "{ 'inner_type': self.inner_type, } def __setstate__(self, state): self.inner_type = state['inner_type'] @property def is_nullable(self):", "return self.name in IdlType.enums @property def enum_values(self): return IdlType.enums.get(self.name) @property def enum_type(self): return", "like short?[] vs. short[]?, instead of treating these # as orthogonal properties (via", "instead of treating these # as orthogonal properties (via flags). callback_functions = {}", "\"\"\" return 'Or'.join(member_type.name for member_type in self.member_types) def resolve_typedefs(self, typedefs): self.member_types = [", "because they are stored # in sets. def __init__(self, member_types): super(IdlUnionType, self).__init__() self.member_types", "resolve_typedefs(self, typedefs): base_type = self.base_type if base_type in typedefs: resolved_type = typedefs[base_type] if", "= inner_type self.extended_attributes = extended_attributes if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in", "cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces): cls.callback_interfaces.update(new_callback_interfaces) @classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def", "raise ValueError( 'Inner type of nullable type must not be a union type", "type must not be a union type that ' 'has a dictionary type", "member_type.is_dictionary) @property def as_union_type(self): # Note: Use this to \"look through\" a possible", "at end of IdlDefinitions constructor return self.name in IdlType.enums @property def enum_values(self): return", "Note: Use this to \"look through\" a possible IdlNullableType wrapper. return self @property", "__str__(self): return 'FrozenArray<%s>' % self.element_type @property def name(self): return self.element_type.name + 'Array' @property", "string_member_type(self): return self.single_matching_member_type( lambda member_type: (member_type.is_string_type or member_type.is_enum) ) @property def numeric_member_type(self): return", "type name (or inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\" return 'Or'.join(member_type.name for", "if member.is_union_type: count += member.number_of_nullable_member_types return count @property def is_union_type(self): return True def", "The Chromium Authors. All rights reserved. # Use of this source code is", "is_record_type(self): return True @property def name(self): return self.key_type.name + self.value_type.name + 'Record' ################################################################################", "includes a nullable type.') if any(member.is_dictionary for member in inner_type.flattened_member_types): raise ValueError( 'Inner", "'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, } def __setstate__(self, state): self.inner_type = state['inner_type'] self.extended_attributes =", "IdlType.enums @property def enum_values(self): return IdlType.enums.get(self.name) @property def enum_type(self): return self.name if self.is_enum", "= self.inner_type.enum_values if inner_values: return [None] + inner_values return None def resolve_typedefs(self, typedefs):", "typedef'ed type.\") # For the case that the resolved type contains other typedef'ed", "= ( INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double', 'unrestricted double',", "a union type that ' 'has a dictionary type as its members.') self.inner_type", "TODO(bashi): Rename this like \"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like", "key in extended_attributes): raise ValueError( 'Extended attributes not applicable to types: %s' %", "# Promise will be basic in future @property def is_string_type(self): return self.name in", "type # |objects|. # We assume we can use two IDL objects of", "raise ValueError('Inner type of nullable type must not be any.') if inner_type.name ==", "ValueError( 'Extended attributes not applicable to types: %s' % self) if ('StringContext' in", "name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, 'extended_attributes': self.extended_attributes, }", "which matches the JavaScript 'null' in the enum parsing code. inner_values = self.inner_type.enum_values", "return 'FrozenArray<%s>' % self.element_type @property def name(self): return self.element_type.name + 'Array' @property def", "@classmethod def set_dictionaries(cls, new_dictionaries): cls.dictionaries.update(new_dictionaries) @classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs):", "for inner_member in member.flattened_member_types: flattened_members[inner_member.name] = inner_member else: flattened_members[member.name] = member return set(flattened_members.values())", "is_annotated_type(self): return True @property def has_string_context(self): return 'StringContext' in self.extended_attributes @property def name(self):", "getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, } def __setstate__(self, state): self.inner_type", "+ inner_values return None def resolve_typedefs(self, typedefs): self.inner_type = self.inner_type.resolve_typedefs(typedefs) return self def", "picklable because we store them in interfaces_info. \"\"\" from collections import defaultdict ################################################################################", "def is_dictionary(self): return self.base_type in IdlType.dictionaries @property def is_enum(self): # FIXME: add an", "def name(self): \"\"\"Return type name (or inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name \"\"\"", "if any(key not in EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES for key in extended_attributes): raise ValueError( 'Extended attributes", "'unrestricted %s' % base_type else: self.base_type = base_type def __str__(self): return self.base_type def", "@property def name(self): annotation = ''.join( (key + ('' if val is None", "= frozenset([ 'AllowShared', 'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs', ]) ################################################################################ # Inheritance ################################################################################ ancestors", "typedefs): self.key_type = self.key_type.resolve_typedefs(typedefs) self.value_type = self.value_type.resolve_typedefs(typedefs) return self @property def is_record_type(self): return", "(member_type.is_string_type or member_type.is_enum) ) @property def numeric_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.is_numeric_type) @property", "for member_type in self.member_types: for idl_type in member_type.idl_types(): yield idl_type ################################################################################ # IdlArrayOrSequenceType,", "@classmethod def set_enums(cls, new_enums): cls.enums.update(new_enums) def resolve_typedefs(self, typedefs): base_type = self.base_type if base_type", "is_frozen_array(self): return False @property def enum_values(self): return self.element_type.enum_values @property def enum_type(self): return self.element_type.enum_type", "base_type, is_unrestricted=False): super(IdlType, self).__init__() if is_unrestricted: self.base_type = 'unrestricted %s' % base_type else:", "None else '=' + val)) for key, val in self.extended_attributes.items()) return '[%s] %s'", "]) NUMERIC_TYPES = ( INTEGER_TYPES | frozenset([ # http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float', 'unrestricted float', 'double',", "def is_custom_callback_function(self): entry = IdlType.callback_functions.get(self.base_type) callback_function = entry.get('callback_function') if not callback_function: return False", "in self.member_types) + ')' def __hash__(self): return hash(self.name) def __eq__(self, rhs): return self.name", "raise NotImplementedError('__str__() should be defined in subclasses') def __getattr__(self, name): # Default undefined", "ValueError( 'StringContext is only applicable to string types.') def __str__(self): annotation = ',", "# Use of this source code is governed by a BSD-style license that", "self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls, new_callback_functions): cls.callback_functions.update(new_callback_functions) @classmethod def set_callback_interfaces(cls, new_callback_interfaces):", "to support types like short?[] vs. short[]?, instead of treating these # as", "name http://heycam.github.io/webidl/#dfn-type-name \"\"\" base_type = self.base_type return TYPE_NAMES.get(base_type, base_type) @classmethod def set_callback_functions(cls, new_callback_functions):", "+ 'Record' ################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type class IdlNullableType(IdlTypeBase): def __init__(self, inner_type):", "vs. short[]?, instead of treating these # as orthogonal properties (via flags). callback_functions", "http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES) BASIC_TYPES = ( PRIMITIVE_TYPES | frozenset([ #", "@property def enum_values(self): return IdlType.enums.get(self.name) @property def enum_type(self): return self.name if self.is_enum else", "in self.inner_type.idl_types(): yield idl_type ################################################################################ # IdlAnnotatedType ################################################################################ class IdlAnnotatedType(IdlTypeBase): \"\"\"IdlAnnoatedType represents an", "self).__init__(element_type) def __str__(self): return 'FrozenArray<%s>' % self.element_type @property def name(self): return self.element_type.name +", "inner_type def __str__(self): # FIXME: Dictionary::ConversionContext::setConversionType can't # handle the '?' in nullable", "of treating these # as orthogonal properties (via flags). callback_functions = {} callback_interfaces", "boolean_member_type(self): return self.single_matching_member_type( lambda member_type: member_type.base_type == 'boolean') @property def sequence_member_type(self): return self.single_matching_member_type(", "use a set directly because each member is an # IdlTypeBase-derived class, and", "self ################################################################################ # IdlUnionType ################################################################################ class IdlUnionType(IdlTypeBase): # http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__()", "if val is None else '=' + val)) for key, val in self.extended_attributes.items())", "__getattr__(self, name): return getattr(self.inner_type, name) def __getstate__(self): return { 'inner_type': self.inner_type, } def", "yield self for idl_type in self.element_type.idl_types(): yield idl_type class IdlSequenceType(IdlArrayOrSequenceType): def __init__(self, element_type):", "self.name in IdlType.enums @property def enum_values(self): return IdlType.enums.get(self.name) @property def enum_type(self): return self.name", "IdlUnionType has __hash__() and __eq__() methods because they are stored # in sets.", "def __init__(self, element_type): super(IdlSequenceType, self).__init__(element_type) def __str__(self): return 'sequence<%s>' % self.element_type @property def", "this like \"IdlArrayTypeBase\" or something. class IdlArrayOrSequenceType(IdlTypeBase): \"\"\"Base class for array-like types.\"\"\" def" ]
[ "def data(self): config = { 'name':'Row', 'icon': 'fa fa-square-o', 'tags': 'row, bootstrap', 'preview':", "Config: def data(self): config = { 'name':'Row', 'icon': 'fa fa-square-o', 'tags': 'row, bootstrap',", "<filename>widget_row/config.py class Config: def data(self): config = { 'name':'Row', 'icon': 'fa fa-square-o', 'tags':", "class Config: def data(self): config = { 'name':'Row', 'icon': 'fa fa-square-o', 'tags': 'row,", "data(self): config = { 'name':'Row', 'icon': 'fa fa-square-o', 'tags': 'row, bootstrap', 'preview': False", "{ 'name':'Row', 'icon': 'fa fa-square-o', 'tags': 'row, bootstrap', 'preview': False } return config", "= { 'name':'Row', 'icon': 'fa fa-square-o', 'tags': 'row, bootstrap', 'preview': False } return", "config = { 'name':'Row', 'icon': 'fa fa-square-o', 'tags': 'row, bootstrap', 'preview': False }" ]
[ "the handler objects(essentially the celery objects) on which the # AlgorithmExecutor will request", "pydantic import BaseModel from typing import Dict, List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from", "request tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler: INodeTasksHandler local_nodes_tasks_handlers: List[INodeTasksHandler] class Config: arbitrary_types_allowed =", "which the # AlgorithmExecutor will request tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler: INodeTasksHandler local_nodes_tasks_handlers:", "True # The second expected data object for the AlgorithmExecutor layer. # It", "objects) on which the # AlgorithmExecutor will request tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler:", "expected data object for the AlgorithmExecutor layer. # It contains the handler objects(essentially", "the AlgorithmExecutor layer. # It contains the handler objects(essentially the celery objects) on", "# It contains the handler objects(essentially the celery objects) on which the #", "handler objects(essentially the celery objects) on which the # AlgorithmExecutor will request tasks", "data object for the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str algorithm_request_dto:", "import INodeTasksHandler # One of the two expected data object for the AlgorithmExecutor", "AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One of the two expected data object", "import BaseModel from typing import Dict, List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface", "class Config: arbitrary_types_allowed = True # The second expected data object for the", "object for the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO", "algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed = True # The second expected", "expected data object for the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str", "# AlgorithmExecutor will request tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler: INodeTasksHandler local_nodes_tasks_handlers: List[INodeTasksHandler] class", "AlgorithmExecutor will request tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler: INodeTasksHandler local_nodes_tasks_handlers: List[INodeTasksHandler] class Config:", "# The second expected data object for the AlgorithmExecutor layer. # It contains", "import Dict, List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One", "from pydantic import BaseModel from typing import Dict, List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO", "the # AlgorithmExecutor will request tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler: INodeTasksHandler local_nodes_tasks_handlers: List[INodeTasksHandler]", "mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One of the two expected", "Config: arbitrary_types_allowed = True # The second expected data object for the AlgorithmExecutor", "will request tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler: INodeTasksHandler local_nodes_tasks_handlers: List[INodeTasksHandler] class Config: arbitrary_types_allowed", "= True # The second expected data object for the AlgorithmExecutor layer. #", "The second expected data object for the AlgorithmExecutor layer. # It contains the", "layer. class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed =", "mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One of the two expected data object for the", "str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed = True # The second", "AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed = True #", "AlgorithmRequestDTO class Config: arbitrary_types_allowed = True # The second expected data object for", "from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One of the two expected data object for", "One of the two expected data object for the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel):", "INodeTasksHandler # One of the two expected data object for the AlgorithmExecutor layer.", "It contains the handler objects(essentially the celery objects) on which the # AlgorithmExecutor", "arbitrary_types_allowed = True # The second expected data object for the AlgorithmExecutor layer.", "from typing import Dict, List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler", "tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler: INodeTasksHandler local_nodes_tasks_handlers: List[INodeTasksHandler] class Config: arbitrary_types_allowed = True", "BaseModel from typing import Dict, List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import", "two expected data object for the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name:", "context_id: str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed = True # The", "str algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed = True # The second expected data", "AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed", "# One of the two expected data object for the AlgorithmExecutor layer. class", "typing import Dict, List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler #", "for the AlgorithmExecutor layer. # It contains the handler objects(essentially the celery objects)", "on which the # AlgorithmExecutor will request tasks execution class NodesTasksHandlersDTO(BaseModel): global_node_tasks_handler: INodeTasksHandler", "layer. # It contains the handler objects(essentially the celery objects) on which the", "celery objects) on which the # AlgorithmExecutor will request tasks execution class NodesTasksHandlersDTO(BaseModel):", "the celery objects) on which the # AlgorithmExecutor will request tasks execution class", "the two expected data object for the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id: str", "second expected data object for the AlgorithmExecutor layer. # It contains the handler", "AlgorithmExecutor layer. # It contains the handler objects(essentially the celery objects) on which", "of the two expected data object for the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id:", "Dict, List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One of", "class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed = True", "algorithm_request_dto: AlgorithmRequestDTO class Config: arbitrary_types_allowed = True # The second expected data object", "from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One of the two", "List from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One of the", "the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class Config:", "objects(essentially the celery objects) on which the # AlgorithmExecutor will request tasks execution", "import AlgorithmRequestDTO from mipengine.controller.node_tasks_handler_interface import INodeTasksHandler # One of the two expected data", "object for the AlgorithmExecutor layer. # It contains the handler objects(essentially the celery", "contains the handler objects(essentially the celery objects) on which the # AlgorithmExecutor will", "for the AlgorithmExecutor layer. class AlgorithmExecutionDTO(BaseModel): context_id: str algorithm_name: str algorithm_request_dto: AlgorithmRequestDTO class", "data object for the AlgorithmExecutor layer. # It contains the handler objects(essentially the" ]
[ "employees workhour list - file name (with .xlsx extention): \") # loading the", ".xlsx extention): \") # loading the source excel file to get data from", "input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else: cnt += 1 print(const.incorrect_values_msg) except ValueError: cnt +=", "list - file name (with .xlsx extention): \") # loading the source excel", "float(input('Break length: ')) year = int(input('Year: ')) month = int(input('Month (1 - 12):", "\".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path) base_ws = base_file.active # check output", "loading the source excel file to get data from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence(", "data to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path) base_ws =", "int(input('Month (1 - 12): ')) # get days in month days_in_month = calendar.monthrange(year,", "# loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month,", "source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] # get data from", "= int(input('Year: ')) month = int(input('Month (1 - 12): ')) # get days", "source_ws['D3'].value # loading the base excel file to save data to base_file_path =", "work hour start in month dict work_hour_start_days = {} # welcomme message print(const.welcome_msg)", "start in month dict work_hour_start_days = {} # welcomme message print(const.welcome_msg) # count", "# check output folder existance output_folder_path = f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) #", "files_handeling import constants as const import input_handeling def main(): # take source file", "- 12): ')) # get days in month days_in_month = calendar.monthrange(year, month)[1] if", "(1 - 12): ')) # get days in month days_in_month = calendar.monthrange(year, month)[1]", "{} # welcomme message print(const.welcome_msg) # count for help msg to be shown", "source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] # get data from source file month_name = source_ws['R1'].value", "# help msg if problems input_handeling.show_help(cnt) pause = float(input('Break length: ')) year =", "input_handeling def main(): # take source file name source_file_name = input( \"\\nSource -", "import work_list import row_style import os import files_handeling import constants as const import", "import os import files_handeling import constants as const import input_handeling def main(): #", "\"\\nSource - employees workhour list - file name (with .xlsx extention): \") #", "to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path) base_ws = base_file.active", "cnt = 0 # get user's input while True: try: # help msg", "work_hour_start_days = {} # welcomme message print(const.welcome_msg) # count for help msg to", "')) # get days in month days_in_month = calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year,", "construction_site = source_ws['D3'].value # loading the base excel file to save data to", "base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path) base_ws = base_file.active #", "openpyxl as xl import calendar import work_list import row_style import os import files_handeling", "days_in_month) break else: cnt += 1 print(const.incorrect_values_msg) except ValueError: cnt += 1 print(const.incorrect_values_msg)", "input_handeling.show_help(cnt) pause = float(input('Break length: ')) year = int(input('Year: ')) month = int(input('Month", "= \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path) base_ws = base_file.active # check", "xl import calendar import work_list import row_style import os import files_handeling import constants", "print(const.incorrect_values_msg) except ValueError: cnt += 1 print(const.incorrect_values_msg) # create new directory after chcecking", "= source_ws['R1'].value construction_site = source_ws['D3'].value # loading the base excel file to save", "style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month, year, work_hour_start_days,", "file to get data from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws =", "constants as const import input_handeling def main(): # take source file name source_file_name", "# loading the source excel file to get data from source_file_path = f\".\\{source_file_name}\"", "# create new directory after chcecking all conditions os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base)", "in month days_in_month = calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year, month): print( f\"Chosen month", "try: # help msg if problems input_handeling.show_help(cnt) pause = float(input('Break length: ')) year", "shown cnt = 0 # get user's input while True: try: # help", "get user's input while True: try: # help msg if problems input_handeling.show_help(cnt) pause", "else: cnt += 1 print(const.incorrect_values_msg) except ValueError: cnt += 1 print(const.incorrect_values_msg) # create", "conditions os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws, base_ws, month_name,", "break else: cnt += 1 print(const.incorrect_values_msg) except ValueError: cnt += 1 print(const.incorrect_values_msg) #", "# count for help msg to be shown cnt = 0 # get", "count for help msg to be shown cnt = 0 # get user's", "# input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else: cnt += 1 print(const.incorrect_values_msg) except", "existance output_folder_path = f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work hour start", "= {} # welcomme message print(const.welcome_msg) # count for help msg to be", "12): ')) # get days in month days_in_month = calendar.monthrange(year, month)[1] if input_handeling.check_input(pause,", "chcecking all conditions os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws,", "# loading the base excel file to save data to base_file_path = \".\\\\base.xlsx\"", "month_name = source_ws['R1'].value construction_site = source_ws['D3'].value # loading the base excel file to", "year, month): print( f\"Chosen month has {days_in_month} days.\") # input handeling input_handeling.handeling_work_hour_days( work_hour_start_days,", "for help msg to be shown cnt = 0 # get user's input", "month)[1] if input_handeling.check_input(pause, year, month): print( f\"Chosen month has {days_in_month} days.\") # input", "= xl.load_workbook(base_file_path) base_ws = base_file.active # check output folder existance output_folder_path = f\".\\{construction_site}", "# get days in month days_in_month = calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year, month):", "= f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work hour start in month", "loading the base excel file to save data to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence(", "# get user's input while True: try: # help msg if problems input_handeling.show_help(cnt)", "except ValueError: cnt += 1 print(const.incorrect_values_msg) # create new directory after chcecking all", "handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else: cnt += 1 print(const.incorrect_values_msg) except ValueError: cnt", "import calendar import work_list import row_style import os import files_handeling import constants as", "if problems input_handeling.show_help(cnt) pause = float(input('Break length: ')) year = int(input('Year: ')) month", "= int(input('Month (1 - 12): ')) # get days in month days_in_month =", "problems input_handeling.show_help(cnt) pause = float(input('Break length: ')) year = int(input('Year: ')) month =", "import files_handeling import constants as const import input_handeling def main(): # take source", "os import files_handeling import constants as const import input_handeling def main(): # take", "msg to be shown cnt = 0 # get user's input while True:", "print(const.incorrect_values_msg) # create new directory after chcecking all conditions os.mkdir(f\"{output_folder_path}\") # loading style", "be shown cnt = 0 # get user's input while True: try: #", "source file name source_file_name = input( \"\\nSource - employees workhour list - file", "after chcecking all conditions os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation", "os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file,", "base_file.active # check output folder existance output_folder_path = f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site)", "while True: try: # help msg if problems input_handeling.show_help(cnt) pause = float(input('Break length:", "different work hour start in month dict work_hour_start_days = {} # welcomme message", "print( f\"Chosen month has {days_in_month} days.\") # input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break", "work_list import row_style import os import files_handeling import constants as const import input_handeling", "loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month, year,", "file name (with .xlsx extention): \") # loading the source excel file to", "xl.load_workbook(source_file_path).worksheets[0] # get data from source file month_name = source_ws['R1'].value construction_site = source_ws['D3'].value", "to be shown cnt = 0 # get user's input while True: try:", "input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else: cnt += 1 print(const.incorrect_values_msg) except ValueError:", "import row_style import os import files_handeling import constants as const import input_handeling def", "data from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] # get", "= f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] # get data from source", "= float(input('Break length: ')) year = int(input('Year: ')) month = int(input('Month (1 -", "f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work hour start in month dict", "# get data from source file month_name = source_ws['R1'].value construction_site = source_ws['D3'].value #", "')) year = int(input('Year: ')) month = int(input('Month (1 - 12): ')) #", "in month dict work_hour_start_days = {} # welcomme message print(const.welcome_msg) # count for", "input_handeling.check_input(pause, year, month): print( f\"Chosen month has {days_in_month} days.\") # input handeling input_handeling.handeling_work_hour_days(", "days in month days_in_month = calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year, month): print( f\"Chosen", "creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month, year, work_hour_start_days, pause, construction_site) files_handeling.exit() if __name__", "- file name (with .xlsx extention): \") # loading the source excel file", "- employees workhour list - file name (with .xlsx extention): \") # loading", "month days_in_month = calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year, month): print( f\"Chosen month has", "')) month = int(input('Month (1 - 12): ')) # get days in month", "the base excel file to save data to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path,", "dict work_hour_start_days = {} # welcomme message print(const.welcome_msg) # count for help msg", "take source file name source_file_name = input( \"\\nSource - employees workhour list -", "files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] # get data from source file month_name", "year = int(input('Year: ')) month = int(input('Month (1 - 12): ')) # get", "month has {days_in_month} days.\") # input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else: cnt", "row_style import os import files_handeling import constants as const import input_handeling def main():", "extention): \") # loading the source excel file to get data from source_file_path", "source_ws['R1'].value construction_site = source_ws['D3'].value # loading the base excel file to save data", "output_folder_path = f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work hour start in", "const import input_handeling def main(): # take source file name source_file_name = input(", "month_name, base_file, month, year, work_hour_start_days, pause, construction_site) files_handeling.exit() if __name__ == \"__main__\": main()", "int(input('Year: ')) month = int(input('Month (1 - 12): ')) # get days in", "+= 1 print(const.incorrect_values_msg) # create new directory after chcecking all conditions os.mkdir(f\"{output_folder_path}\") #", "= calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year, month): print( f\"Chosen month has {days_in_month} days.\")", "worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work hour start in month dict work_hour_start_days =", "(with .xlsx extention): \") # loading the source excel file to get data", "user's input while True: try: # help msg if problems input_handeling.show_help(cnt) pause =", "month = int(input('Month (1 - 12): ')) # get days in month days_in_month", "month dict work_hour_start_days = {} # welcomme message print(const.welcome_msg) # count for help", "excel file to save data to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file", "\") # loading the source excel file to get data from source_file_path =", "base_file = xl.load_workbook(base_file_path) base_ws = base_file.active # check output folder existance output_folder_path =", "import constants as const import input_handeling def main(): # take source file name", "file name source_file_name = input( \"\\nSource - employees workhour list - file name", "source excel file to get data from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name)", "True: try: # help msg if problems input_handeling.show_help(cnt) pause = float(input('Break length: '))", "+= 1 print(const.incorrect_values_msg) except ValueError: cnt += 1 print(const.incorrect_values_msg) # create new directory", "source_file_name = input( \"\\nSource - employees workhour list - file name (with .xlsx", "0 # get user's input while True: try: # help msg if problems", "new directory after chcecking all conditions os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) #", "help msg to be shown cnt = 0 # get user's input while", "= input( \"\\nSource - employees workhour list - file name (with .xlsx extention):", "help msg if problems input_handeling.show_help(cnt) pause = float(input('Break length: ')) year = int(input('Year:", "if input_handeling.check_input(pause, year, month): print( f\"Chosen month has {days_in_month} days.\") # input handeling", "calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year, month): print( f\"Chosen month has {days_in_month} days.\") #", "import openpyxl as xl import calendar import work_list import row_style import os import", "hour start in month dict work_hour_start_days = {} # welcomme message print(const.welcome_msg) #", "length: ')) year = int(input('Year: ')) month = int(input('Month (1 - 12): '))", "{days_in_month} days.\") # input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else: cnt += 1", "data from source file month_name = source_ws['R1'].value construction_site = source_ws['D3'].value # loading the", "base_ws, month_name, base_file, month, year, work_hour_start_days, pause, construction_site) files_handeling.exit() if __name__ == \"__main__\":", "import input_handeling def main(): # take source file name source_file_name = input( \"\\nSource", "f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] # get data from source file", "excel file to get data from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws", "1 print(const.incorrect_values_msg) except ValueError: cnt += 1 print(const.incorrect_values_msg) # create new directory after", "msg if problems input_handeling.show_help(cnt) pause = float(input('Break length: ')) year = int(input('Year: '))", "to save data to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path)", "get data from source file month_name = source_ws['R1'].value construction_site = source_ws['D3'].value # loading", "# table creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month, year, work_hour_start_days, pause, construction_site) files_handeling.exit()", "files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work hour start in month dict work_hour_start_days = {}", "base excel file to save data to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name)", "output folder existance output_folder_path = f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work", "from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] # get data", "from source file month_name = source_ws['R1'].value construction_site = source_ws['D3'].value # loading the base", "files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path) base_ws = base_file.active # check output folder", "<gh_stars>1-10 import openpyxl as xl import calendar import work_list import row_style import os", "check output folder existance output_folder_path = f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different", "message print(const.welcome_msg) # count for help msg to be shown cnt = 0", "= 0 # get user's input while True: try: # help msg if", "folder existance output_folder_path = f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work hour", "month): print( f\"Chosen month has {days_in_month} days.\") # input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month)", "work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month, year, work_hour_start_days, pause, construction_site) files_handeling.exit() if __name__ ==", "days.\") # input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else: cnt += 1 print(const.incorrect_values_msg)", "save data to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path) base_ws", "# welcomme message print(const.welcome_msg) # count for help msg to be shown cnt", "has {days_in_month} days.\") # input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else: cnt +=", "# different work hour start in month dict work_hour_start_days = {} # welcomme", "get data from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] #", "xl.load_workbook(base_file_path) base_ws = base_file.active # check output folder existance output_folder_path = f\".\\{construction_site} employees", "employees worklists\" files_handeling.check_required_files_existence(output_folder_path, construction_site=construction_site) # different work hour start in month dict work_hour_start_days", "cnt += 1 print(const.incorrect_values_msg) except ValueError: cnt += 1 print(const.incorrect_values_msg) # create new", "source_file_name) base_file = xl.load_workbook(base_file_path) base_ws = base_file.active # check output folder existance output_folder_path", "name source_file_name = input( \"\\nSource - employees workhour list - file name (with", "base_ws = base_file.active # check output folder existance output_folder_path = f\".\\{construction_site} employees worklists\"", "welcomme message print(const.welcome_msg) # count for help msg to be shown cnt =", "get days in month days_in_month = calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year, month): print(", "days_in_month = calendar.monthrange(year, month)[1] if input_handeling.check_input(pause, year, month): print( f\"Chosen month has {days_in_month}", "# take source file name source_file_name = input( \"\\nSource - employees workhour list", "source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0] # get data from source file month_name =", "create new directory after chcecking all conditions os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off)", "source file month_name = source_ws['R1'].value construction_site = source_ws['D3'].value # loading the base excel", "as const import input_handeling def main(): # take source file name source_file_name =", "file to save data to base_file_path = \".\\\\base.xlsx\" files_handeling.check_required_files_existence( base_file_path, source_file_name) base_file =", "base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month, year, work_hour_start_days, pause, construction_site)", "file month_name = source_ws['R1'].value construction_site = source_ws['D3'].value # loading the base excel file", "cnt += 1 print(const.incorrect_values_msg) # create new directory after chcecking all conditions os.mkdir(f\"{output_folder_path}\")", "= xl.load_workbook(source_file_path).worksheets[0] # get data from source file month_name = source_ws['R1'].value construction_site =", "= base_file.active # check output folder existance output_folder_path = f\".\\{construction_site} employees worklists\" files_handeling.check_required_files_existence(output_folder_path,", "work_hour_start_days, days_in_month) break else: cnt += 1 print(const.incorrect_values_msg) except ValueError: cnt += 1", "base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month, year, work_hour_start_days, pause,", "= source_ws['D3'].value # loading the base excel file to save data to base_file_path", "def main(): # take source file name source_file_name = input( \"\\nSource - employees", "pause = float(input('Break length: ')) year = int(input('Year: ')) month = int(input('Month (1", "base_file_path, source_file_name) base_file = xl.load_workbook(base_file_path) base_ws = base_file.active # check output folder existance", "print(const.welcome_msg) # count for help msg to be shown cnt = 0 #", "1 print(const.incorrect_values_msg) # create new directory after chcecking all conditions os.mkdir(f\"{output_folder_path}\") # loading", "as xl import calendar import work_list import row_style import os import files_handeling import", "input( \"\\nSource - employees workhour list - file name (with .xlsx extention): \")", "main(): # take source file name source_file_name = input( \"\\nSource - employees workhour", "calendar import work_list import row_style import os import files_handeling import constants as const", "directory after chcecking all conditions os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table", "construction_site=construction_site) # different work hour start in month dict work_hour_start_days = {} #", "table creation work_list.fill_worksheet(source_ws, base_ws, month_name, base_file, month, year, work_hour_start_days, pause, construction_site) files_handeling.exit() if", "source_ws = xl.load_workbook(source_file_path).worksheets[0] # get data from source file month_name = source_ws['R1'].value construction_site", "ValueError: cnt += 1 print(const.incorrect_values_msg) # create new directory after chcecking all conditions", "all conditions os.mkdir(f\"{output_folder_path}\") # loading style base_file.add_named_style(row_style.base) base_file.add_named_style(row_style.days_off) # table creation work_list.fill_worksheet(source_ws, base_ws,", "to get data from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path, source_file_name) source_ws = xl.load_workbook(source_file_path).worksheets[0]", "the source excel file to get data from source_file_path = f\".\\{source_file_name}\" files_handeling.check_required_files_existence( source_file_path,", "f\"Chosen month has {days_in_month} days.\") # input handeling input_handeling.handeling_work_hour_days( work_hour_start_days, days_in_month) break else:", "name (with .xlsx extention): \") # loading the source excel file to get", "workhour list - file name (with .xlsx extention): \") # loading the source", "input while True: try: # help msg if problems input_handeling.show_help(cnt) pause = float(input('Break" ]
[ "'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical':", "[], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013,", "[], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51,", "'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [],", "[], {'max_length': '100'}) }, 'mozspaces.photo': { 'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm):", "{'unique': 'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace':", "'True', 'related_name': \"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length':", "('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering':", "11, 5, 41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password':", "'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to':", "[], {}), 'lon': ('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField',", "model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting model 'Photo' db.delete_table('mozspaces_photo')", "{'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField',", "('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [],", "}, 'mozspaces.photo': { 'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey',", "('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41,", "[], {'unique': 'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) },", "from south.db import db from south.v2 import SchemaMigration from django.db import models class", "max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) # Adding model 'Photo' db.create_table('mozspaces_photo', (", "'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}) }", "'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat':", "'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical':", "{'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model',", "'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length': '75', 'blank': 'True'}),", "self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm): #", "Adding model 'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), ))", "{'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) },", "'region': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length':", "41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [],", "'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff':", "related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) # Adding model 'Keyword' db.create_table('mozspaces_keyword', ( ('id',", "= { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name':", "model 'Photo' db.delete_table('mozspaces_photo') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField',", "}, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11,", "\"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),", "# Deleting model 'Photo' db.delete_table('mozspaces_photo') models = { 'auth.group': { 'Meta': {'object_name': 'Group'},", "db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword'])", "'300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField',", "('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'}, 'address':", "{'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField',", "# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration", "'50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2,", "{'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'},", "forwards(self, orm): # Adding model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address',", "('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)),", "{'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name':", "'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),", ")) db.send_create_signal('mozspaces', ['MozSpace']) # Adding model 'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True,", "'Photo' db.delete_table('mozspaces_photo') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [],", "import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'MozSpace' db.create_table('mozspaces_mozspace', (", "'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to':", "[], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'region':", "# Deleting model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting model 'Photo' db.delete_table('mozspaces_photo') models = {", "('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False',", "import datetime from south.db import db from south.v2 import SchemaMigration from django.db import", "{'max_length': '100'}) }, 'mozspaces.photo': { 'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\",", "('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null': 'True',", "models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'MozSpace'", "{'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name':", "'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11,", "blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='',", "'100'}), 'phone': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [],", "('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) # Adding model 'Keyword' db.create_table('mozspaces_keyword',", "[], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}) },", "'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType',", "'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\",", "\"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField',", "{'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100',", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [],", "('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "[], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length':", "('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'})", "\"''\", 'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank':", "'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.photo': { 'Meta': {'object_name': 'Photo'},", "'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField',", "{'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country':", "'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [],", "('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [],", "'30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\",", "\"''\", 'max_length': '75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\", 'blank': 'True'}), 'id':", "('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),", "'100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.photo': { 'Meta': {'object_name':", "('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length':", "max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True,", "'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [],", "[], {'default': \"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [],", "'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together':", "blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces',", "[], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}),", "'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model':", "'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [],", "{ 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField',", "'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [],", "db.send_create_signal('mozspaces', ['Keyword']) # Adding model 'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace',", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField',", "'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique':", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta':", "'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}),", "['MozSpace']) # Adding model 'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace',", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length':", "'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})", "\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) },", "south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration):", "south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): #", "django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'MozSpace' db.create_table('mozspaces_mozspace',", "self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) # Adding model 'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),", "self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),", "orm): # Adding model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)),", "[], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default':", "self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])),", "'mozspaces.photo': { 'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [],", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile':", "'5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email':", "Deleting model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting model 'Photo' db.delete_table('mozspaces_photo') models = { 'auth.group':", "'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta':", "'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\",", "41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [],", "'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label':", "{'default': \"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}),", "# Adding model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region',", "'email': ('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length': '75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default':", "('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm): # Deleting model 'MozSpace'", "max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), ))", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions':", "'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField',", "('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) # Adding model 'Photo' db.create_table('mozspaces_photo', ( ('id',", "{'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\",", "{'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default': \"''\",", "('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat',", "model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100,", "'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField',", "[], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null': 'True', 'to':", "'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5,", "{'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name':", "{'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}),", "'128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [],", "db.send_create_signal('mozspaces', ['MozSpace']) # Adding model 'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),", "{'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to':", "('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [],", "'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission':", "51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length':", "'related_name': \"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length': '75',", "('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}),", "'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}),", "import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding", "{'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to':", "('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank':", "{ 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41,", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField', [], {}), 'name':", "def forwards(self, orm): # Adding model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)),", "'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [],", "\"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length':", "'True'}) }, 'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name':", "'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions':", "'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined':", "'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.photo': { 'Meta': {'object_name': 'Photo'}, 'id':", "'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.photo': { 'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField',", "['Keyword']) # Adding model 'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos',", "('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100,", "db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting model 'Photo' db.delete_table('mozspaces_photo') models =", "'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}), 'last_name':", "'phone': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'default':", "'75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [],", ")) db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm): # Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting", "self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),", "('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'default': \"''\",", "self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) #", "[], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank':", "'100'}) }, 'mozspaces.photo': { 'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace':", "\"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length': '75', 'blank':", "{'default': \"''\", 'max_length': '75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\", 'blank': 'True'}),", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [],", "class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),", "('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone':", "[], {'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null': 'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default':", "'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': {", "'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type',", "[], {'max_length': '30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [],", "'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\",", "self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) # Adding model", "'max_length': '100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.photo': { 'Meta':", "'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [],", "\"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [],", "('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm):", "'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [],", "encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from", "'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank':", "'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [],", "('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.keyword': { 'Meta':", "'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces',", "{'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'},", "[], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [],", "null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) # Adding model 'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),", "{'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) },", "{'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo':", "self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm): # Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace')", "{'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}),", "('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length':", "'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}) },", "'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey',", "'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank':", "Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting model 'Photo'", "'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename':", "self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)),", "'max_length': '75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField',", "'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField',", "self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) # Adding model", "[], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [],", "to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) # Adding model 'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile',", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField', [], {}),", "{'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField',", "('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"})", "self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)),", "'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.keyword':", "('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email',", "('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon',", "'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\",", "'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'},", "('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField',", "\"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city':", "['Photo']) def backwards(self, orm): # Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword'", "{'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length':", "{ 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField',", "'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta':", ")) db.send_create_signal('mozspaces', ['Keyword']) # Adding model 'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),", "{'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}) } } complete_apps =", "[], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank':", "'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': {", "'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),", "db.delete_table('mozspaces_photo') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "\"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user':", "'75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [],", "{'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser':", "'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField', [],", "'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False',", "'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [],", "{'max_length': '100'}) }, 'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "'80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': {", "[], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField',", "'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}),", "[], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}),", "'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta':", "{'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField', [],", "'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51,", "self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) # Adding model 'Photo' db.create_table('mozspaces_photo',", "db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def", "self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) # Adding model 'Keyword' db.create_table('mozspaces_keyword', (", "'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [],", "'lat': ('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),", "Adding model 'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])),", "('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to':", "self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='',", "('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) # Adding", "model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting model 'Photo' db.delete_table('mozspaces_photo') models = { 'auth.group': {", "('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm): # Deleting", "'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField',", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'mozspace':", "('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'default': \"''\",", "'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75,", "datetime from south.db import db from south.v2 import SchemaMigration from django.db import models", "[], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),", "{'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length':", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default':", "\"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField',", "('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True',", "'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id':", "'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),", "[], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label',", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField',", "'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField',", "{ 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique':", "Adding model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='',", "('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [],", "db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm): # Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model", "'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo'])", "self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo',", "('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length':", "[], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField', [],", "[], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length':", "{'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30',", "('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default':", "[], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length':", "{}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100',", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey',", "('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country',", "('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) # Adding", "'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField',", "'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [],", "'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}),", "'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id':", "# Adding model 'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])),", "{}), 'lon': ('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [],", "'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField',", "'50'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta': {'object_name':", "{ 'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name':", "\"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}) } } complete_apps = ['mozspaces']", "}, 'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField',", "'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True',", "def backwards(self, orm): # Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword' db.delete_table('mozspaces_keyword')", "SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model", "('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False',", "self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='',", "[], {'default': \"''\", 'max_length': '75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\", 'blank':", "max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone',", "'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering':", "'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank':", "{'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login':", "'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),", "'100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey',", "\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\",", "('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length': '75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\",", "'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "{'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator':", "('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace'])", "{ 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField',", "'Keyword' db.delete_table('mozspaces_keyword') # Deleting model 'Photo' db.delete_table('mozspaces_photo') models = { 'auth.group': { 'Meta':", "[], {'max_length': '100'}) }, 'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "db.delete_table('mozspaces_keyword') # Deleting model 'Photo' db.delete_table('mozspaces_photo') models = { 'auth.group': { 'Meta': {'object_name':", "('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\", 'unique_together':", "842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30',", "'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField',", "{'related_name': \"'keywords'\", 'to': \"orm['mozspaces.MozSpace']\"}) }, 'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [],", "'100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'timezone':", "{ 'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length':", "'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey',", "{'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name':", "( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def backwards(self,", "{'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length':", "[], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name':", "[], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active':", "blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True, to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) # Adding model 'Keyword'", "'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField',", "[], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups':", "import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def", "'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype':", "'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [],", "to=orm['mozspaces.Photo'])), )) db.send_create_signal('mozspaces', ['MozSpace']) # Adding model 'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword',", "blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace',", "'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical':", "'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5,", "5, 41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField',", "'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type':", "{'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75',", "('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone',", "self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()),", "'100'}) }, 'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword':", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': {", "\"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}), 'lon':", "'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank':", "{'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013,", "'100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id':", "'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [],", "self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)), ('timezone', self.gf('django.db.models.fields.CharField')(max_length=100)), ('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()),", "5, 41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField',", "'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length':", "'30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table':", "self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)), ('country', self.gf('django.db.models.fields.CharField')(max_length=5)),", "model 'Photo' db.create_table('mozspaces_photo', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('photofile', self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces',", "'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting model 'Photo' db.delete_table('mozspaces_photo') models", "'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField',", "'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [],", "51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length':", "('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) # Adding model 'Photo'", "[], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}) } } complete_apps", "[], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})", "('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "[], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}), 'username':", "[], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})", "db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city',", "('django.db.models.fields.TextField', [], {'default': \"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField',", "}, 'auth.permission': { 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'},", "self.gf('django.db.models.fields.files.ImageField')(max_length=100)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='photos', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm): # Deleting model", "'lon': ('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'default':", "\"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'featured_mozspace'\",", "{'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2,", "( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=100)),", "('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.photo': { 'Meta': {'object_name': 'Photo'}, 'id': ('django.db.models.fields.AutoField', [],", "'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Keyword']) #", "\"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length': '75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField', [],", "('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('cover_photo', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='featured_mozspace', null=True,", "11, 5, 41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name':", "[], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label',", "{'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}),", "{ 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique':", "'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank':", "orm): # Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting", "('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [],", "'30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}), 'id':", "{'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})", "'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('address', self.gf('django.db.models.fields.CharField')(max_length=300)), ('region', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),", "'True'}), 'extra_text': ('django.db.models.fields.TextField', [], {'default': \"''\", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db", "'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self,", "[], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length':", "{'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})", "{'max_length': '30', 'blank': 'True'}), 'password': ('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to':", "# Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword' db.delete_table('mozspaces_keyword') # Deleting model", "\"''\", 'max_length': '100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.photo': {", "model 'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords', to=orm['mozspaces.MozSpace'])), ))", "{'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.photo':", "Deleting model 'Photo' db.delete_table('mozspaces_photo') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id':", "'100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField',", "to=orm['mozspaces.MozSpace'])), )) db.send_create_signal('mozspaces', ['Photo']) def backwards(self, orm): # Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') #", "}, 'mozspaces.mozspace': { 'Meta': {'object_name': 'MozSpace'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'city': ('django.db.models.fields.CharField',", "\"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),", "[], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField', [], {}), 'name': ('django.db.models.fields.CharField',", "# Adding model 'Keyword' db.create_table('mozspaces_keyword', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('keyword', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)), ('mozspace', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keywords',", "2, 11, 5, 41, 51, 842643)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),", "[], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length':", "[], {'max_length': '100'}), 'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}),", "'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'featured_mozspace'\", 'null':", "{'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length':", "{'primary_key': 'True'}), 'mozspace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length':", "'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default':", "'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 11, 5, 41, 51, 842704)'}), 'email':", "[], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.keyword': { 'Meta': {'object_name':", "[], {'to': \"orm['auth.User']\"}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True',", "('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default':", "'True', 'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length': '75', 'blank': 'True'}), 'extra_text':", "('lon', self.gf('django.db.models.fields.FloatField')()), ('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator',", "'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name':", "'True'}), 'region': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'timezone': ('django.db.models.fields.CharField', [],", "('lat', self.gf('django.db.models.fields.FloatField')()), ('phone', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75, blank=True)), ('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('extra_text',", "backwards(self, orm): # Deleting model 'MozSpace' db.delete_table('mozspaces_mozspace') # Deleting model 'Keyword' db.delete_table('mozspaces_keyword') #", "'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\",", "[], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),", "\"(('content_type', 'codename'),)\", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to':", "'100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField',", "2, 11, 5, 41, 51, 842704)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),", "}, 'contenttypes.contenttype': { 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},", "'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'mozspaces.keyword': {", "('<PASSWORD>.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}),", "'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [],", "Migration(SchemaMigration): def forwards(self, orm): # Adding model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name',", "'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.FloatField', [], {}), 'lon': ('django.db.models.fields.FloatField',", "{'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}),", "{'max_length': '100'}), 'phone': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '100', 'blank': 'True'}), 'region': ('django.db.models.fields.CharField',", "('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'photos'\", 'to': \"orm['mozspaces.MozSpace']\"}), 'photofile': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}) } }", "'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'unique': 'True',", "'to': \"orm['mozspaces.Photo']\"}), 'email': ('django.db.models.fields.EmailField', [], {'default': \"''\", 'max_length': '75', 'blank': 'True'}), 'extra_text': ('django.db.models.fields.TextField',", "models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'MozSpace' db.create_table('mozspaces_mozspace', ( ('id',", "[], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'})," ]
[ "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End of the Day", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End of the Day : \\U0001F595 \\b Fuck You", "#replace '+' with '000' print(\"\\U0001F600\") print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End of the Day : \\U0001F595 \\b Fuck", "print(\"\\U0001F602\") print(\"At the End of the Day : \\U0001F595 \\b Fuck You !\")", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End of the Day : \\U0001F595 \\b", "print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End of the Day : \\U0001F595", "print(\"\\U0001F600\") print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "https://unicode.org/emoji/charts/full-emoji-list.html #replace '+' with '000' print(\"\\U0001F600\") print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End of", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End of the Day :", ": https://unicode.org/emoji/charts/full-emoji-list.html #replace '+' with '000' print(\"\\U0001F600\") print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\")", "'+' with '000' print(\"\\U0001F600\") print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"At the End of the", "print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "with '000' print(\"\\U0001F600\") print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "'000' print(\"\\U0001F600\") print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")", "#website : https://unicode.org/emoji/charts/full-emoji-list.html #replace '+' with '000' print(\"\\U0001F600\") print(\"\\U0001F603\") print(\"\\U0001F604\") print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\")", "print(\"\\U0001F601\") print(\"\\U0001F606\") print(\"\\U0001F605\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\") print(\"\\U0001F602\")" ]
[ "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "Unless required by applicable law or agreed to in writing, software # distributed", "context = utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup() expected_setup_output = ovn_nbctl_show_output actual_setup_output = nb_context.context[\"ovn-nb\"]", "See the # License for the specific language governing permissions and limitations #", "\"License\"); you may # not use this file except in compliance with the", "from rally_ovs.tests.unit.plugins.ovs import utils from tests.unit import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self,", "switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup() expected_setup_output = ovn_nbctl_show_output actual_setup_output =", "the License. You may obtain # a copy of the License at #", "ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils from tests.unit import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def", "law or agreed to in writing, software # distributed under the License is", "tests.unit import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "limitations # under the License. import mock from rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs", "under the License. import mock from rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils", "port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client", "express or implied. See the # License for the specific language governing permissions", "lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={})", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "not use this file except in compliance with the License. You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "with the License. You may obtain # a copy of the License at", "# under the License. import mock from rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs import", "for the specific language governing permissions and limitations # under the License. import", "Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "from tests.unit import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\", "port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port", "License for the specific language governing permissions and limitations # under the License.", "= ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup() expected_setup_output = ovn_nbctl_show_output actual_setup_output", "2.0 (the \"License\"); you may # not use this file except in compliance", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "language governing permissions and limitations # under the License. import mock from rally_ovs.plugins.ovs.context", "permissions and limitations # under the License. import mock from rally_ovs.plugins.ovs.context import ovn_nb", "lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "rally_ovs.tests.unit.plugins.ovs import utils from tests.unit import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client):", "use this file except in compliance with the License. You may obtain #", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "compliance with the License. You may obtain # a copy of the License", "License, Version 2.0 (the \"License\"); you may # not use this file except", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u", "specific language governing permissions and limitations # under the License. import mock from", "test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "implied. See the # License for the specific language governing permissions and limitations", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "OF ANY KIND, either express or implied. See the # License for the", "the License. import mock from rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils from", "port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value", "rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils from tests.unit import test class OvnNorthboundContextTestCase(test.TestCase):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "from rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils from tests.unit import test class", "\"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context)", "utils from tests.unit import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output =", "(lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context =", "\"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf", "port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0", "you may # not use this file except in compliance with the License.", "ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj)", "port lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context", "agreed to in writing, software # distributed under the License is distributed on", "48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2", "(the \"License\"); you may # not use this file except in compliance with", "KIND, either express or implied. See the # License for the specific language", "may # not use this file except in compliance with the License. You", "<reponame>LorenzoBianconi/ovn-scale-test # Copyright 2018 Red Hat, Inc. # # Licensed under the Apache", "either express or implied. See the # License for the specific language governing", "def test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg", "# # Unless required by applicable law or agreed to in writing, software", "7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb", "file except in compliance with the License. You may obtain # a copy", "mock from rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils from tests.unit import test", "this file except in compliance with the License. You may obtain # a", "# Unless required by applicable law or agreed to in writing, software #", "2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0", "by applicable law or agreed to in writing, software # distributed under the", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License,", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "or implied. See the # License for the specific language governing permissions and", "License. import mock from rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils from tests.unit", "mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup() expected_setup_output =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "import mock from rally_ovs.plugins.ovs.context import ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils from tests.unit import", "lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context =", "governing permissions and limitations # under the License. import mock from rally_ovs.plugins.ovs.context import", "License. You may obtain # a copy of the License at # #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value =", "mock_client = mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup()", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "@mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port", "mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "ANY KIND, either express or implied. See the # License for the specific", "the # License for the specific language governing permissions and limitations # under", "except in compliance with the License. You may obtain # a copy of", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "= utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup() expected_setup_output = ovn_nbctl_show_output actual_setup_output = nb_context.context[\"ovn-nb\"] self.assertEqual(expected_setup_output,", "(lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch", "(lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\"", "to in writing, software # distributed under the License is distributed on an", "lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce)", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "the specific language governing permissions and limitations # under the License. import mock", "import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126", "and limitations # under the License. import mock from rally_ovs.plugins.ovs.context import ovn_nb from", "required by applicable law or agreed to in writing, software # distributed under", "mock_client.show.return_value = ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup() expected_setup_output = ovn_nbctl_show_output", "applicable law or agreed to in writing, software # distributed under the License", "import ovn_nb from rally_ovs.tests.unit.plugins.ovs import utils from tests.unit import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\")", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "lport_c52f4c_8h7hn2 switch 9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "= \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port", "= mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output context = utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup() expected_setup_output", "test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O)", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "in compliance with the License. You may obtain # a copy of the", "OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE", "# not use this file except in compliance with the License. You may", "class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output = \"\"\"\\ switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port", "or agreed to in writing, software # distributed under the License is distributed", "import utils from tests.unit import test class OvnNorthboundContextTestCase(test.TestCase): @mock.patch(\"rally_ovs.plugins.ovs.ovsclients_impl.OvnNbctl.create_client\") def test_setup(self, mock_create_client): ovn_nbctl_show_output", "# License for the specific language governing permissions and limitations # under the", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "9fea76cf-d73e-4dc8-a2a3-1e98b9d8eab0 (lswitch_c52f4c_T0m6Ce) port lport_c52f4c_X3px3u port lport_c52f4c_92dhqb \"\"\" mock_client = mock_create_client.return_value mock_client.show.return_value = ovn_nbctl_show_output", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "switch 48732e5d-b018-4bad-a1b6-8dbc762f4126 (lswitch_c52f4c_xFG42O) port lport_c52f4c_LXzXCE port lport_c52f4c_dkZSDg switch 7f55c582-c007-4fba-810d-a14ead480851 (lswitch_c52f4c_Rv0Jcj) port lport_c52f4c_cm8SIf port", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "utils.get_fake_context(ovn_nb={}) nb_context = ovn_nb.OvnNorthboundContext(context) nb_context.setup() expected_setup_output = ovn_nbctl_show_output actual_setup_output = nb_context.context[\"ovn-nb\"] self.assertEqual(expected_setup_output, actual_setup_output)", "Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the", "Version 2.0 (the \"License\"); you may # not use this file except in" ]
[ "Pools.\"\"\" from .aio_fixed_task_pool_simple import AioFixedTaskPoolSimple from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority", "from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple from", "import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple from .aio_autoscale_task_pool_lifo import AioAutoScaleTaskPoolLifo from .aio_autoscale_task_pool_priority import", "AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple from .aio_autoscale_task_pool_lifo import AioAutoScaleTaskPoolLifo from .aio_autoscale_task_pool_priority import AioAutoScaleTaskPoolPriority", "import AioFixedTaskPoolSimple from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import", "AioFixedTaskPoolSimple from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple", "AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple from .aio_autoscale_task_pool_lifo import AioAutoScaleTaskPoolLifo", "from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple from .aio_autoscale_task_pool_lifo import AioAutoScaleTaskPoolLifo from", "Supported Task Pools.\"\"\" from .aio_fixed_task_pool_simple import AioFixedTaskPoolSimple from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority", "Task Pools.\"\"\" from .aio_fixed_task_pool_simple import AioFixedTaskPoolSimple from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import", "from .aio_fixed_task_pool_simple import AioFixedTaskPoolSimple from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from", "\"\"\"All Supported Task Pools.\"\"\" from .aio_fixed_task_pool_simple import AioFixedTaskPoolSimple from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from", ".aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple from .aio_autoscale_task_pool_lifo import AioAutoScaleTaskPoolLifo from .aio_autoscale_task_pool_priority", ".aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple from .aio_autoscale_task_pool_lifo", ".aio_fixed_task_pool_simple import AioFixedTaskPoolSimple from .aio_fixed_task_pool_lifo import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple", "import AioFixedTaskPoolLifo from .aio_fixed_task_pool_priority import AioFixedTaskPoolPriority from .aio_autoscale_task_pool_simple import AioAutoScaleTaskPoolSimple from .aio_autoscale_task_pool_lifo import" ]